summaryrefslogtreecommitdiffstats
path: root/arch/frv
diff options
context:
space:
mode:
authorAnton Arapov <anton@redhat.com>2012-04-16 10:05:28 +0200
committerAnton Arapov <anton@redhat.com>2012-04-16 10:05:28 +0200
commitb4b6116a13633898cf868f2f103c96a90c4c20f8 (patch)
tree93d1b7e2cfcdf473d8d4ff3ad141fa864f8491f6 /arch/frv
parentedd4be777c953e5faafc80d091d3084b4343f5d3 (diff)
downloadkernel-uprobes-b4b6116a13633898cf868f2f103c96a90c4c20f8.tar.gz
kernel-uprobes-b4b6116a13633898cf868f2f103c96a90c4c20f8.tar.xz
kernel-uprobes-b4b6116a13633898cf868f2f103c96a90c4c20f8.zip
fedora kernel: d9aad82f3319f3cfd1aebc01234254ef0c37ad84v3.3.2-1
Signed-off-by: Anton Arapov <anton@redhat.com>
Diffstat (limited to 'arch/frv')
-rw-r--r--arch/frv/Kconfig375
-rw-r--r--arch/frv/Kconfig.debug52
-rw-r--r--arch/frv/Makefile97
-rw-r--r--arch/frv/boot/Makefile74
-rw-r--r--arch/frv/defconfig39
-rw-r--r--arch/frv/include/asm/Kbuild4
-rw-r--r--arch/frv/include/asm/asm-offsets.h1
-rw-r--r--arch/frv/include/asm/atomic.h260
-rw-r--r--arch/frv/include/asm/auxvec.h4
-rw-r--r--arch/frv/include/asm/ax88796.h22
-rw-r--r--arch/frv/include/asm/bitops.h410
-rw-r--r--arch/frv/include/asm/bitsperlong.h1
-rw-r--r--arch/frv/include/asm/bug.h54
-rw-r--r--arch/frv/include/asm/bugs.h14
-rw-r--r--arch/frv/include/asm/busctl-regs.h41
-rw-r--r--arch/frv/include/asm/byteorder.h6
-rw-r--r--arch/frv/include/asm/cache.h23
-rw-r--r--arch/frv/include/asm/cacheflush.h105
-rw-r--r--arch/frv/include/asm/checksum.h180
-rw-r--r--arch/frv/include/asm/cpu-irqs.h81
-rw-r--r--arch/frv/include/asm/cpumask.h6
-rw-r--r--arch/frv/include/asm/cputime.h6
-rw-r--r--arch/frv/include/asm/current.h30
-rw-r--r--arch/frv/include/asm/delay.h50
-rw-r--r--arch/frv/include/asm/device.h7
-rw-r--r--arch/frv/include/asm/div64.h1
-rw-r--r--arch/frv/include/asm/dm9000.h37
-rw-r--r--arch/frv/include/asm/dma-mapping.h135
-rw-r--r--arch/frv/include/asm/dma.h125
-rw-r--r--arch/frv/include/asm/elf.h142
-rw-r--r--arch/frv/include/asm/emergency-restart.h6
-rw-r--r--arch/frv/include/asm/errno.h7
-rw-r--r--arch/frv/include/asm/fb.h12
-rw-r--r--arch/frv/include/asm/fcntl.h1
-rw-r--r--arch/frv/include/asm/fpu.h11
-rw-r--r--arch/frv/include/asm/ftrace.h1
-rw-r--r--arch/frv/include/asm/futex.h20
-rw-r--r--arch/frv/include/asm/gdb-stub.h146
-rw-r--r--arch/frv/include/asm/gpio-regs.h116
-rw-r--r--arch/frv/include/asm/hardirq.h26
-rw-r--r--arch/frv/include/asm/highmem.h167
-rw-r--r--arch/frv/include/asm/hw_irq.h16
-rw-r--r--arch/frv/include/asm/io.h392
-rw-r--r--arch/frv/include/asm/ioctl.h1
-rw-r--r--arch/frv/include/asm/ioctls.h10
-rw-r--r--arch/frv/include/asm/ipcbuf.h1
-rw-r--r--arch/frv/include/asm/irc-regs.h53
-rw-r--r--arch/frv/include/asm/irq.h30
-rw-r--r--arch/frv/include/asm/irq_regs.h27
-rw-r--r--arch/frv/include/asm/irqflags.h158
-rw-r--r--arch/frv/include/asm/kdebug.h1
-rw-r--r--arch/frv/include/asm/kmap_types.h29
-rw-r--r--arch/frv/include/asm/linkage.h7
-rw-r--r--arch/frv/include/asm/local.h6
-rw-r--r--arch/frv/include/asm/local64.h1
-rw-r--r--arch/frv/include/asm/math-emu.h301
-rw-r--r--arch/frv/include/asm/mb-regs.h200
-rw-r--r--arch/frv/include/asm/mb86943a.h42
-rw-r--r--arch/frv/include/asm/mb93091-fpga-irqs.h42
-rw-r--r--arch/frv/include/asm/mb93093-fpga-irqs.h29
-rw-r--r--arch/frv/include/asm/mb93493-irqs.h50
-rw-r--r--arch/frv/include/asm/mb93493-regs.h281
-rw-r--r--arch/frv/include/asm/mc146818rtc.h16
-rw-r--r--arch/frv/include/asm/mem-layout.h86
-rw-r--r--arch/frv/include/asm/mman.h1
-rw-r--r--arch/frv/include/asm/mmu.h41
-rw-r--r--arch/frv/include/asm/mmu_context.h50
-rw-r--r--arch/frv/include/asm/module.h28
-rw-r--r--arch/frv/include/asm/msgbuf.h32
-rw-r--r--arch/frv/include/asm/mutex.h9
-rw-r--r--arch/frv/include/asm/page.h76
-rw-r--r--arch/frv/include/asm/param.h8
-rw-r--r--arch/frv/include/asm/pci.h63
-rw-r--r--arch/frv/include/asm/percpu.h6
-rw-r--r--arch/frv/include/asm/perf_event.h17
-rw-r--r--arch/frv/include/asm/pgalloc.h69
-rw-r--r--arch/frv/include/asm/pgtable.h544
-rw-r--r--arch/frv/include/asm/poll.h12
-rw-r--r--arch/frv/include/asm/posix_types.h62
-rw-r--r--arch/frv/include/asm/processor.h152
-rw-r--r--arch/frv/include/asm/ptrace.h89
-rw-r--r--arch/frv/include/asm/registers.h232
-rw-r--r--arch/frv/include/asm/resource.h7
-rw-r--r--arch/frv/include/asm/scatterlist.h6
-rw-r--r--arch/frv/include/asm/sections.h46
-rw-r--r--arch/frv/include/asm/segment.h45
-rw-r--r--arch/frv/include/asm/sembuf.h26
-rw-r--r--arch/frv/include/asm/serial-regs.h44
-rw-r--r--arch/frv/include/asm/serial.h18
-rw-r--r--arch/frv/include/asm/setup.h31
-rw-r--r--arch/frv/include/asm/shmbuf.h43
-rw-r--r--arch/frv/include/asm/shmparam.h7
-rw-r--r--arch/frv/include/asm/sigcontext.h26
-rw-r--r--arch/frv/include/asm/siginfo.h12
-rw-r--r--arch/frv/include/asm/signal.h44
-rw-r--r--arch/frv/include/asm/smp.h9
-rw-r--r--arch/frv/include/asm/socket.h69
-rw-r--r--arch/frv/include/asm/sockios.h14
-rw-r--r--arch/frv/include/asm/spinlock.h17
-rw-r--r--arch/frv/include/asm/spr-regs.h416
-rw-r--r--arch/frv/include/asm/stat.h100
-rw-r--r--arch/frv/include/asm/statfs.h7
-rw-r--r--arch/frv/include/asm/string.h51
-rw-r--r--arch/frv/include/asm/swab.h10
-rw-r--r--arch/frv/include/asm/syscall.h123
-rw-r--r--arch/frv/include/asm/system.h158
-rw-r--r--arch/frv/include/asm/termbits.h203
-rw-r--r--arch/frv/include/asm/termios.h58
-rw-r--r--arch/frv/include/asm/thread_info.h137
-rw-r--r--arch/frv/include/asm/timer-regs.h106
-rw-r--r--arch/frv/include/asm/timex.h20
-rw-r--r--arch/frv/include/asm/tlb.h27
-rw-r--r--arch/frv/include/asm/tlbflush.h73
-rw-r--r--arch/frv/include/asm/topology.h12
-rw-r--r--arch/frv/include/asm/types.h26
-rw-r--r--arch/frv/include/asm/uaccess.h319
-rw-r--r--arch/frv/include/asm/ucontext.h12
-rw-r--r--arch/frv/include/asm/unaligned.h22
-rw-r--r--arch/frv/include/asm/unistd.h388
-rw-r--r--arch/frv/include/asm/user.h80
-rw-r--r--arch/frv/include/asm/vga.h17
-rw-r--r--arch/frv/include/asm/virtconvert.h41
-rw-r--r--arch/frv/include/asm/xor.h1
-rw-r--r--arch/frv/kernel/Makefile23
-rw-r--r--arch/frv/kernel/asm-offsets.c108
-rw-r--r--arch/frv/kernel/break.S792
-rw-r--r--arch/frv/kernel/cmode.S189
-rw-r--r--arch/frv/kernel/debug-stub.c259
-rw-r--r--arch/frv/kernel/dma.c463
-rw-r--r--arch/frv/kernel/entry-table.S329
-rw-r--r--arch/frv/kernel/entry.S1531
-rw-r--r--arch/frv/kernel/frv_ksyms.c114
-rw-r--r--arch/frv/kernel/futex.c242
-rw-r--r--arch/frv/kernel/gdb-io.c216
-rw-r--r--arch/frv/kernel/gdb-io.h55
-rw-r--r--arch/frv/kernel/gdb-stub.c2150
-rw-r--r--arch/frv/kernel/head-mmu-fr451.S374
-rw-r--r--arch/frv/kernel/head-uc-fr401.S311
-rw-r--r--arch/frv/kernel/head-uc-fr451.S174
-rw-r--r--arch/frv/kernel/head-uc-fr555.S347
-rw-r--r--arch/frv/kernel/head.S643
-rw-r--r--arch/frv/kernel/head.inc50
-rw-r--r--arch/frv/kernel/init_task.c32
-rw-r--r--arch/frv/kernel/irq-mb93091.c158
-rw-r--r--arch/frv/kernel/irq-mb93093.c131
-rw-r--r--arch/frv/kernel/irq-mb93493.c148
-rw-r--r--arch/frv/kernel/irq.c160
-rw-r--r--arch/frv/kernel/kernel_execve.S33
-rw-r--r--arch/frv/kernel/kernel_thread.S77
-rw-r--r--arch/frv/kernel/local.h59
-rw-r--r--arch/frv/kernel/local64.h1
-rw-r--r--arch/frv/kernel/module.c27
-rw-r--r--arch/frv/kernel/pm-mb93093.c65
-rw-r--r--arch/frv/kernel/pm.c353
-rw-r--r--arch/frv/kernel/process.c378
-rw-r--r--arch/frv/kernel/ptrace.c378
-rw-r--r--arch/frv/kernel/setup.c1179
-rw-r--r--arch/frv/kernel/signal.c590
-rw-r--r--arch/frv/kernel/sleep.S373
-rw-r--r--arch/frv/kernel/switch_to.S489
-rw-r--r--arch/frv/kernel/sys_frv.c44
-rw-r--r--arch/frv/kernel/sysctl.c221
-rw-r--r--arch/frv/kernel/time.c123
-rw-r--r--arch/frv/kernel/traps.c659
-rw-r--r--arch/frv/kernel/uaccess.c100
-rw-r--r--arch/frv/kernel/vmlinux.lds.S132
-rw-r--r--arch/frv/lib/Makefile8
-rw-r--r--arch/frv/lib/__ashldi3.S40
-rw-r--r--arch/frv/lib/__ashrdi3.S41
-rw-r--r--arch/frv/lib/__lshrdi3.S40
-rw-r--r--arch/frv/lib/__muldi3.S32
-rw-r--r--arch/frv/lib/__negdi2.S28
-rw-r--r--arch/frv/lib/__ucmpdi2.S45
-rw-r--r--arch/frv/lib/atomic-ops.S172
-rw-r--r--arch/frv/lib/atomic64-ops.S162
-rw-r--r--arch/frv/lib/cache.S98
-rw-r--r--arch/frv/lib/checksum.c166
-rw-r--r--arch/frv/lib/insl_ns.S52
-rw-r--r--arch/frv/lib/insl_sw.S40
-rw-r--r--arch/frv/lib/memcpy.S135
-rw-r--r--arch/frv/lib/memset.S182
-rw-r--r--arch/frv/lib/outsl_ns.S59
-rw-r--r--arch/frv/lib/outsl_sw.S45
-rw-r--r--arch/frv/mb93090-mb00/Makefile15
-rw-r--r--arch/frv/mb93090-mb00/flash.c90
-rw-r--r--arch/frv/mb93090-mb00/pci-dma-nommu.c145
-rw-r--r--arch/frv/mb93090-mb00/pci-dma.c90
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.c196
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.h42
-rw-r--r--arch/frv/mb93090-mb00/pci-irq.c65
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c455
-rw-r--r--arch/frv/mm/Makefile9
-rw-r--r--arch/frv/mm/cache-page.c71
-rw-r--r--arch/frv/mm/dma-alloc.c183
-rw-r--r--arch/frv/mm/elf-fdpic.c128
-rw-r--r--arch/frv/mm/extable.c74
-rw-r--r--arch/frv/mm/fault.c326
-rw-r--r--arch/frv/mm/highmem.c89
-rw-r--r--arch/frv/mm/init.c207
-rw-r--r--arch/frv/mm/kmap.c52
-rw-r--r--arch/frv/mm/mmu-context.c208
-rw-r--r--arch/frv/mm/pgalloc.c153
-rw-r--r--arch/frv/mm/tlb-flush.S184
-rw-r--r--arch/frv/mm/tlb-miss.S629
204 files changed, 28072 insertions, 0 deletions
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
new file mode 100644
index 00000000000..a685910d2d5
--- /dev/null
+++ b/arch/frv/Kconfig
@@ -0,0 +1,375 @@
+config FRV
+ bool
+ default y
+ select HAVE_IDE
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_IRQ_WORK
+ select HAVE_PERF_EVENTS
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_SHOW
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select GENERIC_CPU_DEVICES
+
+config ZONE_DMA
+ bool
+ default y
+
+config RWSEM_GENERIC_SPINLOCK
+ bool
+ default y
+
+config RWSEM_XCHGADD_ALGORITHM
+ bool
+
+config GENERIC_HWEIGHT
+ bool
+ default y
+
+config GENERIC_CALIBRATE_DELAY
+ bool
+ default n
+
+config TIME_LOW_RES
+ bool
+ default y
+
+config QUICKLIST
+ bool
+ default y
+
+config ARCH_HAS_ILOG2_U32
+ bool
+ default y
+
+config ARCH_HAS_ILOG2_U64
+ bool
+ default y
+
+config HZ
+ int
+ default 1000
+
+source "init/Kconfig"
+
+source "kernel/Kconfig.freezer"
+
+
+menu "Fujitsu FR-V system setup"
+
+config MMU
+ bool "MMU support"
+ help
+ This options switches on and off support for the FR-V MMU
+ (effectively switching between vmlinux and uClinux). Not all FR-V
+ CPUs support this. Currently only the FR451 has a sufficiently
+ featured MMU.
+
+config FRV_OUTOFLINE_ATOMIC_OPS
+ bool "Out-of-line the FRV atomic operations"
+ default n
+ help
+ Setting this option causes the FR-V atomic operations to be mostly
+ implemented out-of-line.
+
+ See Documentation/frv/atomic-ops.txt for more information.
+
+config HIGHMEM
+ bool "High memory support"
+ depends on MMU
+ default y
+ help
+ If you wish to use more than 256MB of memory with your MMU based
+ system, you will need to select this option. The kernel can only see
+ the memory between 0xC0000000 and 0xD0000000 directly... everything
+ else must be kmapped.
+
+ The arch is, however, capable of supporting up to 3GB of SDRAM.
+
+config HIGHPTE
+ bool "Allocate page tables in highmem"
+ depends on HIGHMEM
+ default y
+ help
+ The VM uses one page of memory for each page table. For systems
+ with a lot of RAM, this can be wasteful of precious low memory.
+ Setting this option will put user-space page tables in high memory.
+
+source "mm/Kconfig"
+
+choice
+ prompt "uClinux kernel load address"
+ depends on !MMU
+ default UCPAGE_OFFSET_C0000000
+ help
+ This option sets the base address for the uClinux kernel. The kernel
+ will rearrange the SDRAM layout to start at this address, and move
+ itself to start there. It must be greater than 0, and it must be
+ sufficiently less than 0xE0000000 that the SDRAM does not intersect
+ the I/O region.
+
+ The base address must also be aligned such that the SDRAM controller
+ can decode it. For instance, a 512MB SDRAM bank must be 512MB aligned.
+
+config UCPAGE_OFFSET_20000000
+ bool "0x20000000"
+
+config UCPAGE_OFFSET_40000000
+ bool "0x40000000"
+
+config UCPAGE_OFFSET_60000000
+ bool "0x60000000"
+
+config UCPAGE_OFFSET_80000000
+ bool "0x80000000"
+
+config UCPAGE_OFFSET_A0000000
+ bool "0xA0000000"
+
+config UCPAGE_OFFSET_C0000000
+ bool "0xC0000000 (Recommended)"
+
+endchoice
+
+config PAGE_OFFSET
+ hex
+ default 0x20000000 if UCPAGE_OFFSET_20000000
+ default 0x40000000 if UCPAGE_OFFSET_40000000
+ default 0x60000000 if UCPAGE_OFFSET_60000000
+ default 0x80000000 if UCPAGE_OFFSET_80000000
+ default 0xA0000000 if UCPAGE_OFFSET_A0000000
+ default 0xC0000000
+
+config PROTECT_KERNEL
+ bool "Protect core kernel against userspace"
+ depends on !MMU
+ default y
+ help
+ Selecting this option causes the uClinux kernel to change the
+ permittivity of DAMPR register covering the core kernel image to
+ prevent userspace accessing the underlying memory directly.
+
+choice
+ prompt "CPU Caching mode"
+ default FRV_DEFL_CACHE_WBACK
+ help
+ This option determines the default caching mode for the kernel.
+
+ Write-Back caching mode involves the all reads and writes causing
+ the affected cacheline to be read into the cache first before being
+ operated upon. Memory is not then updated by a write until the cache
+ is filled and a cacheline needs to be displaced from the cache to
+ make room. Only at that point is it written back.
+
+ Write-Behind caching is similar to Write-Back caching, except that a
+ write won't fetch a cacheline into the cache if there isn't already
+ one there; it will write directly to memory instead.
+
+ Write-Through caching only fetches cachelines from memory on a
+ read. Writes always get written directly to memory. If the affected
+ cacheline is also in cache, it will be updated too.
+
+ The final option is to turn of caching entirely.
+
+ Note that not all CPUs support Write-Behind caching. If the CPU on
+ which the kernel is running doesn't, it'll fall back to Write-Back
+ caching.
+
+config FRV_DEFL_CACHE_WBACK
+ bool "Write-Back"
+
+config FRV_DEFL_CACHE_WBEHIND
+ bool "Write-Behind"
+
+config FRV_DEFL_CACHE_WTHRU
+ bool "Write-Through"
+
+config FRV_DEFL_CACHE_DISABLED
+ bool "Disabled"
+
+endchoice
+
+menu "CPU core support"
+
+config CPU_FR401
+ bool "Include FR401 core support"
+ depends on !MMU
+ default y
+ help
+ This enables support for the FR401, FR401A and FR403 CPUs
+
+config CPU_FR405
+ bool "Include FR405 core support"
+ depends on !MMU
+ default y
+ help
+ This enables support for the FR405 CPU
+
+config CPU_FR451
+ bool "Include FR451 core support"
+ default y
+ help
+ This enables support for the FR451 CPU
+
+config CPU_FR451_COMPILE
+ bool "Specifically compile for FR451 core"
+ depends on CPU_FR451 && !CPU_FR401 && !CPU_FR405 && !CPU_FR551
+ default y
+ help
+ This causes appropriate flags to be passed to the compiler to
+ optimise for the FR451 CPU
+
+config CPU_FR551
+ bool "Include FR551 core support"
+ depends on !MMU
+ default y
+ help
+ This enables support for the FR555 CPU
+
+config CPU_FR551_COMPILE
+ bool "Specifically compile for FR551 core"
+ depends on CPU_FR551 && !CPU_FR401 && !CPU_FR405 && !CPU_FR451
+ default y
+ help
+ This causes appropriate flags to be passed to the compiler to
+ optimise for the FR555 CPU
+
+config FRV_L1_CACHE_SHIFT
+ int
+ default "5" if CPU_FR401 || CPU_FR405 || CPU_FR451
+ default "6" if CPU_FR551
+
+endmenu
+
+choice
+ prompt "System support"
+ default MB93091_VDK
+
+config MB93091_VDK
+ bool "MB93091 CPU board with or without motherboard"
+
+config MB93093_PDK
+ bool "MB93093 PDK unit"
+
+endchoice
+
+if MB93091_VDK
+choice
+ prompt "Motherboard support"
+ default MB93090_MB00
+
+config MB93090_MB00
+ bool "Use the MB93090-MB00 motherboard"
+ help
+ Select this option if the MB93091 CPU board is going to be used with
+ a MB93090-MB00 VDK motherboard
+
+config MB93091_NO_MB
+ bool "Use standalone"
+ help
+ Select this option if the MB93091 CPU board is going to be used
+ without a motherboard
+
+endchoice
+endif
+
+config FUJITSU_MB93493
+ bool "MB93493 Multimedia chip"
+ help
+ Select this option if the MB93493 multimedia chip is going to be
+ used.
+
+choice
+ prompt "GP-Relative data support"
+ default GPREL_DATA_8
+ help
+ This option controls what data, if any, should be placed in the GP
+ relative data sections. Using this means that the compiler can
+ generate accesses to the data using GR16-relative addressing which
+ is faster than absolute instructions and saves space (2 instructions
+ per access).
+
+ However, the GPREL region is limited in size because the immediate
+ value used in the load and store instructions is limited to a 12-bit
+ signed number.
+
+ So if the linker starts complaining that accesses to GPREL data are
+ out of range, try changing this option from the default.
+
+ Note that modules will always be compiled with this feature disabled
+ as the module data will not be in range of the GP base address.
+
+config GPREL_DATA_8
+ bool "Put data objects of up to 8 bytes into GP-REL"
+
+config GPREL_DATA_4
+ bool "Put data objects of up to 4 bytes into GP-REL"
+
+config GPREL_DATA_NONE
+ bool "Don't use GP-REL"
+
+endchoice
+
+config FRV_ONCPU_SERIAL
+ bool "Use on-CPU serial ports"
+ select SERIAL_8250
+ default y
+
+config PCI
+ bool "Use PCI"
+ depends on MB93090_MB00
+ default y
+ select GENERIC_PCI_IOMAP
+ help
+ Some FR-V systems (such as the MB93090-MB00 VDK) have PCI
+ onboard. If you have one of these boards and you wish to use the PCI
+ facilities, say Y here.
+
+config RESERVE_DMA_COHERENT
+ bool "Reserve DMA coherent memory"
+ depends on PCI && !MMU
+ default y
+ help
+ Many PCI drivers require access to uncached memory for DMA device
+ communications (such as is done with some Ethernet buffer rings). If
+ a fully featured MMU is available, this can be done through page
+ table settings, but if not, a region has to be set aside and marked
+ with a special DAMPR register.
+
+ Setting this option causes uClinux to set aside a portion of the
+ available memory for use in this manner. The memory will then be
+ unavailable for normal kernel use.
+
+source "drivers/pci/Kconfig"
+
+source "drivers/pcmcia/Kconfig"
+
+menu "Power management options"
+
+config ARCH_SUSPEND_POSSIBLE
+ def_bool y
+
+source kernel/power/Kconfig
+endmenu
+
+endmenu
+
+
+menu "Executable formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "arch/frv/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/arch/frv/Kconfig.debug b/arch/frv/Kconfig.debug
new file mode 100644
index 00000000000..211f01bc4ca
--- /dev/null
+++ b/arch/frv/Kconfig.debug
@@ -0,0 +1,52 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+
+config GDBSTUB
+ bool "Remote GDB kernel debugging"
+ depends on DEBUG_KERNEL
+ select DEBUG_INFO
+ select FRAME_POINTER
+ help
+ If you say Y here, it will be possible to remotely debug the kernel
+ using gdb. This enlarges your kernel ELF image disk size by several
+ megabytes and requires a machine with more than 16 MB, better 32 MB
+ RAM to avoid excessive linking time. This is only useful for kernel
+ hackers. If unsure, say N.
+
+choice
+ prompt "GDB stub port"
+ default GDBSTUB_UART1
+ depends on GDBSTUB
+ help
+ Select the on-CPU port used for GDB-stub
+
+config GDBSTUB_UART0
+ bool "/dev/ttyS0"
+
+config GDBSTUB_UART1
+ bool "/dev/ttyS1"
+
+endchoice
+
+config GDBSTUB_IMMEDIATE
+ bool "Break into GDB stub immediately"
+ depends on GDBSTUB
+ help
+ If you say Y here, GDB stub will break into the program as soon as
+ possible, leaving the program counter at the beginning of
+ start_kernel() in init/main.c.
+
+config GDB_CONSOLE
+ bool "Console output to GDB"
+ depends on GDBSTUB
+ help
+ If you are using GDB for remote debugging over a serial port and
+ would like kernel messages to be formatted into GDB $O packets so
+ that GDB prints them as program output, say 'Y'.
+
+endmenu
diff --git a/arch/frv/Makefile b/arch/frv/Makefile
new file mode 100644
index 00000000000..7ff84575b18
--- /dev/null
+++ b/arch/frv/Makefile
@@ -0,0 +1,97 @@
+#
+# frv/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (c) 2003, 2004 Red Hat Inc.
+# - Written by David Howells <dhowells@redhat.com>
+# - Derived from arch/m68knommu/Makefile,
+# Copyright (c) 1999,2001 D. Jeff Dionne <jeff@lineo.ca>,
+# Rt-Control Inc. / Lineo, Inc.
+#
+# Copyright (C) 1998,1999 D. Jeff Dionne <jeff@uclinux.org>,
+# Kenneth Albanowski <kjahds@kjahds.com>,
+#
+# Based on arch/m68k/Makefile:
+# Copyright (C) 1994 by Hamish Macdonald
+#
+
+ifdef CONFIG_MMU
+UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\"
+else
+UTS_SYSNAME = -DUTS_SYSNAME=\"uClinux\"
+endif
+
+KBUILD_AFLAGS_MODULE += -G0 -mlong-calls
+KBUILD_CFLAGS_MODULE += -G0 -mlong-calls
+
+ifdef CONFIG_GPREL_DATA_8
+KBUILD_CFLAGS += -G8
+else
+ifdef CONFIG_GPREL_DATA_4
+KBUILD_CFLAGS += -G4
+else
+ifdef CONFIG_GPREL_DATA_NONE
+KBUILD_CFLAGS += -G0
+endif
+endif
+endif
+
+#LDFLAGS_vmlinux := -Map linkmap.txt
+
+ifdef CONFIG_GC_SECTIONS
+KBUILD_CFLAGS += -ffunction-sections -fdata-sections
+endif
+
+ifndef CONFIG_FRAME_POINTER
+KBUILD_CFLAGS += -mno-linked-fp
+endif
+
+ifdef CONFIG_CPU_FR451_COMPILE
+KBUILD_CFLAGS += -mcpu=fr450
+KBUILD_AFLAGS += -mcpu=fr450
+else
+ifdef CONFIG_CPU_FR551_COMPILE
+KBUILD_CFLAGS += -mcpu=fr550
+KBUILD_AFLAGS += -mcpu=fr550
+else
+KBUILD_CFLAGS += -mcpu=fr400
+KBUILD_AFLAGS += -mcpu=fr400
+endif
+endif
+
+# pretend the kernel is going to run on an FR400 with no media-fp unit
+# - reserve CC3 for use with atomic ops
+# - all the extra registers are dealt with only at context switch time
+KBUILD_CFLAGS += -mno-fdpic -mgpr-32 -msoft-float -mno-media
+KBUILD_CFLAGS += -ffixed-fcc3 -ffixed-cc3 -ffixed-gr15 -ffixed-icc2
+KBUILD_AFLAGS += -mno-fdpic
+
+# make sure the .S files get compiled with debug info
+# and disable optimisations that are unhelpful whilst debugging
+ifdef CONFIG_DEBUG_INFO
+#KBUILD_CFLAGS += -O1
+KBUILD_AFLAGS += -Wa,--gdwarf2
+endif
+
+head-y := arch/frv/kernel/head.o arch/frv/kernel/init_task.o
+
+core-y += arch/frv/kernel/ arch/frv/mm/
+libs-y += arch/frv/lib/
+
+core-$(CONFIG_MB93090_MB00) += arch/frv/mb93090-mb00/
+
+all: Image
+
+Image: vmlinux
+ $(Q)$(MAKE) $(build)=arch/frv/boot $@
+
+archclean:
+ $(Q)$(MAKE) $(clean)=arch/frv/boot
diff --git a/arch/frv/boot/Makefile b/arch/frv/boot/Makefile
new file mode 100644
index 00000000000..6ae3254da01
--- /dev/null
+++ b/arch/frv/boot/Makefile
@@ -0,0 +1,74 @@
+#
+# arch/arm/boot/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995-2000 Russell King
+#
+
+targets := Image zImage bootpImage
+
+SYSTEM =$(LINUX)
+
+ZTEXTADDR = 0x02080000
+PARAMS_PHYS = 0x0207c000
+INITRD_PHYS = 0x02180000
+INITRD_VIRT = 0x02180000
+
+#
+# If you don't define ZRELADDR above,
+# then it defaults to ZTEXTADDR
+#
+ifeq ($(ZRELADDR),)
+ZRELADDR = $(ZTEXTADDR)
+endif
+
+export SYSTEM ZTEXTADDR ZBSSADDR ZRELADDR INITRD_PHYS INITRD_VIRT PARAMS_PHYS
+
+Image: $(obj)/Image
+
+targets: $(obj)/Image
+
+$(obj)/Image: vmlinux FORCE
+ $(OBJCOPY) -O binary -R .note -R .comment -S vmlinux $@
+
+#$(obj)/Image: $(CONFIGURE) $(SYSTEM)
+# $(OBJCOPY) -O binary -R .note -R .comment -g -S $(SYSTEM) $@
+
+bzImage: zImage
+
+zImage: $(CONFIGURE) compressed/$(LINUX)
+ $(OBJCOPY) -O binary -R .note -R .comment -S compressed/$(LINUX) $@
+
+bootpImage: bootp/bootp
+ $(OBJCOPY) -O binary -R .note -R .comment -S bootp/bootp $@
+
+compressed/$(LINUX): $(LINUX) dep
+ @$(MAKE) -C compressed $(LINUX)
+
+bootp/bootp: zImage initrd
+ @$(MAKE) -C bootp bootp
+
+initrd:
+ @test "$(INITRD_VIRT)" != "" || (echo This architecture does not support INITRD; exit -1)
+ @test "$(INITRD)" != "" || (echo You must specify INITRD; exit -1)
+
+#
+# installation
+#
+install: $(CONFIGURE) Image
+ sh ./install.sh $(KERNELRELEASE) Image System.map "$(INSTALL_PATH)"
+
+zinstall: $(CONFIGURE) zImage
+ sh ./install.sh $(KERNELRELEASE) zImage System.map "$(INSTALL_PATH)"
+
+#
+# miscellany
+#
+mrproper clean:
+# @$(MAKE) -C compressed clean
+# @$(MAKE) -C bootp clean
+
+dep:
diff --git a/arch/frv/defconfig b/arch/frv/defconfig
new file mode 100644
index 00000000000..b1b792610fd
--- /dev/null
+++ b/arch/frv/defconfig
@@ -0,0 +1,39 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EXPERT=y
+# CONFIG_HOTPLUG is not set
+CONFIG_MMU=y
+CONFIG_FRV_OUTOFLINE_ATOMIC_OPS=y
+CONFIG_FRV_DEFL_CACHE_WTHRU=y
+CONFIG_GPREL_DATA_4=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_NET_PCI=y
+CONFIG_NE2K_PCI=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
new file mode 100644
index 00000000000..5be6663cfee
--- /dev/null
+++ b/arch/frv/include/asm/Kbuild
@@ -0,0 +1,4 @@
+include include/asm-generic/Kbuild.asm
+
+header-y += registers.h
+header-y += termios.h
diff --git a/arch/frv/include/asm/asm-offsets.h b/arch/frv/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/frv/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
new file mode 100644
index 00000000000..0d8a7d66174
--- /dev/null
+++ b/arch/frv/include/asm/atomic.h
@@ -0,0 +1,260 @@
+/* atomic.h: atomic operation emulation for FR-V
+ *
+ * For an explanation of how atomic ops work in this arch, see:
+ * Documentation/frv/atomic-ops.txt
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <linux/types.h>
+#include <asm/spr-regs.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_SMP
+#error not SMP safe
+#endif
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ *
+ * We do not have SMP systems, so we don't have to deal with that.
+ */
+
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#define ATOMIC_INIT(i) { (i) }
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long val;
+
+ asm("0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " add%I2 %1,%2,%1 \n"
+ " cst.p %1,%M0 ,cc3,#1 \n"
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
+ " beq icc3,#0,0b \n"
+ : "+U"(v->counter), "=&r"(val)
+ : "NPr"(i)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ return val;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long val;
+
+ asm("0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " sub%I2 %1,%2,%1 \n"
+ " cst.p %1,%M0 ,cc3,#1 \n"
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
+ " beq icc3,#0,0b \n"
+ : "+U"(v->counter), "=&r"(val)
+ : "NPr"(i)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ return val;
+}
+
+#else
+
+extern int atomic_add_return(int i, atomic_t *v);
+extern int atomic_sub_return(int i, atomic_t *v);
+
+#endif
+
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+ return atomic_add_return(i, v) < 0;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ atomic_add_return(i, v);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ atomic_sub_return(i, v);
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+ atomic_add_return(1, v);
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+ atomic_sub_return(1, v);
+}
+
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
+
+/*
+ * 64-bit atomic ops
+ */
+typedef struct {
+ volatile long long counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i) { (i) }
+
+static inline long long atomic64_read(atomic64_t *v)
+{
+ long long counter;
+
+ asm("ldd%I1 %M1,%0"
+ : "=e"(counter)
+ : "m"(v->counter));
+ return counter;
+}
+
+static inline void atomic64_set(atomic64_t *v, long long i)
+{
+ asm volatile("std%I0 %1,%M0"
+ : "=m"(v->counter)
+ : "e"(i));
+}
+
+extern long long atomic64_inc_return(atomic64_t *v);
+extern long long atomic64_dec_return(atomic64_t *v);
+extern long long atomic64_add_return(long long i, atomic64_t *v);
+extern long long atomic64_sub_return(long long i, atomic64_t *v);
+
+static inline long long atomic64_add_negative(long long i, atomic64_t *v)
+{
+ return atomic64_add_return(i, v) < 0;
+}
+
+static inline void atomic64_add(long long i, atomic64_t *v)
+{
+ atomic64_add_return(i, v);
+}
+
+static inline void atomic64_sub(long long i, atomic64_t *v)
+{
+ atomic64_sub_return(i, v);
+}
+
+static inline void atomic64_inc(atomic64_t *v)
+{
+ atomic64_inc_return(v);
+}
+
+static inline void atomic64_dec(atomic64_t *v)
+{
+ atomic64_dec_return(v);
+}
+
+#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
+#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
+
+/*****************************************************************************/
+/*
+ * exchange value with memory
+ */
+extern uint64_t __xchg_64(uint64_t i, volatile void *v);
+
+#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+
+#define xchg(ptr, x) \
+({ \
+ __typeof__(ptr) __xg_ptr = (ptr); \
+ __typeof__(*(ptr)) __xg_orig; \
+ \
+ switch (sizeof(__xg_orig)) { \
+ case 4: \
+ asm volatile( \
+ "swap%I0 %M0,%1" \
+ : "+m"(*__xg_ptr), "=r"(__xg_orig) \
+ : "1"(x) \
+ : "memory" \
+ ); \
+ break; \
+ \
+ default: \
+ __xg_orig = (__typeof__(__xg_orig))0; \
+ asm volatile("break"); \
+ break; \
+ } \
+ \
+ __xg_orig; \
+})
+
+#else
+
+extern uint32_t __xchg_32(uint32_t i, volatile void *v);
+
+#define xchg(ptr, x) \
+({ \
+ __typeof__(ptr) __xg_ptr = (ptr); \
+ __typeof__(*(ptr)) __xg_orig; \
+ \
+ switch (sizeof(__xg_orig)) { \
+ case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr); break; \
+ default: \
+ __xg_orig = (__typeof__(__xg_orig))0; \
+ asm volatile("break"); \
+ break; \
+ } \
+ __xg_orig; \
+})
+
+#endif
+
+#define tas(ptr) (xchg((ptr), 1))
+
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
+#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
+#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
+#define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
+
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c;
+}
+
+
+#endif /* _ASM_ATOMIC_H */
diff --git a/arch/frv/include/asm/auxvec.h b/arch/frv/include/asm/auxvec.h
new file mode 100644
index 00000000000..07710778fa1
--- /dev/null
+++ b/arch/frv/include/asm/auxvec.h
@@ -0,0 +1,4 @@
+#ifndef __FRV_AUXVEC_H
+#define __FRV_AUXVEC_H
+
+#endif
diff --git a/arch/frv/include/asm/ax88796.h b/arch/frv/include/asm/ax88796.h
new file mode 100644
index 00000000000..637e980393c
--- /dev/null
+++ b/arch/frv/include/asm/ax88796.h
@@ -0,0 +1,22 @@
+/* ax88796.h: access points to the driver for the AX88796 NE2000 clone
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_AX88796_H
+#define _ASM_AX88796_H
+
+#include <asm/mb-regs.h>
+
+#define AX88796_IOADDR (__region_CS1 + 0x200)
+#define AX88796_IRQ IRQ_CPU_EXTERNAL7
+#define AX88796_FULL_DUPLEX 0 /* force full duplex */
+#define AX88796_BUS_INFO "CS1#+0x200" /* bus info for ethtool */
+
+#endif /* _ASM_AX88796_H */
diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h
new file mode 100644
index 00000000000..57bf85db893
--- /dev/null
+++ b/arch/frv/include/asm/bitops.h
@@ -0,0 +1,410 @@
+/* bitops.h: bit operations for the Fujitsu FR-V CPUs
+ *
+ * For an explanation of how atomic ops work in this arch, see:
+ * Documentation/frv/atomic-ops.txt
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#include <linux/compiler.h>
+#include <asm/byteorder.h>
+
+#ifdef __KERNEL__
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/ffz.h>
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+static inline
+unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
+{
+ unsigned long old, tmp;
+
+ asm volatile(
+ "0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " and%I3 %1,%3,%2 \n"
+ " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
+ " beq icc3,#0,0b \n"
+ : "+U"(*v), "=&r"(old), "=r"(tmp)
+ : "NPr"(~mask)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ return old;
+}
+
+static inline
+unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
+{
+ unsigned long old, tmp;
+
+ asm volatile(
+ "0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " or%I3 %1,%3,%2 \n"
+ " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
+ " beq icc3,#0,0b \n"
+ : "+U"(*v), "=&r"(old), "=r"(tmp)
+ : "NPr"(mask)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ return old;
+}
+
+static inline
+unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
+{
+ unsigned long old, tmp;
+
+ asm volatile(
+ "0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " xor%I3 %1,%3,%2 \n"
+ " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
+ " beq icc3,#0,0b \n"
+ : "+U"(*v), "=&r"(old), "=r"(tmp)
+ : "NPr"(mask)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ return old;
+}
+
+#else
+
+extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
+extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
+extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
+
+#endif
+
+#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
+#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
+
+static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
+{
+ volatile unsigned long *ptr = addr;
+ unsigned long mask = 1UL << (nr & 31);
+ ptr += nr >> 5;
+ return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0;
+}
+
+static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
+{
+ volatile unsigned long *ptr = addr;
+ unsigned long mask = 1UL << (nr & 31);
+ ptr += nr >> 5;
+ return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0;
+}
+
+static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
+{
+ volatile unsigned long *ptr = addr;
+ unsigned long mask = 1UL << (nr & 31);
+ ptr += nr >> 5;
+ return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0;
+}
+
+static inline void clear_bit(unsigned long nr, volatile void *addr)
+{
+ test_and_clear_bit(nr, addr);
+}
+
+static inline void set_bit(unsigned long nr, volatile void *addr)
+{
+ test_and_set_bit(nr, addr);
+}
+
+static inline void change_bit(unsigned long nr, volatile void *addr)
+{
+ test_and_change_bit(nr, addr);
+}
+
+static inline void __clear_bit(unsigned long nr, volatile void *addr)
+{
+ volatile unsigned long *a = addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 31);
+ *a &= ~mask;
+}
+
+static inline void __set_bit(unsigned long nr, volatile void *addr)
+{
+ volatile unsigned long *a = addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 31);
+ *a |= mask;
+}
+
+static inline void __change_bit(unsigned long nr, volatile void *addr)
+{
+ volatile unsigned long *a = addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 31);
+ *a ^= mask;
+}
+
+static inline int __test_and_clear_bit(unsigned long nr, volatile void *addr)
+{
+ volatile unsigned long *a = addr;
+ int mask, retval;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 31);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+ return retval;
+}
+
+static inline int __test_and_set_bit(unsigned long nr, volatile void *addr)
+{
+ volatile unsigned long *a = addr;
+ int mask, retval;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 31);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+ return retval;
+}
+
+static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
+{
+ volatile unsigned long *a = addr;
+ int mask, retval;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 31);
+ retval = (mask & *a) != 0;
+ *a ^= mask;
+ return retval;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static inline int
+__constant_test_bit(unsigned long nr, const volatile void *addr)
+{
+ return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static inline int __test_bit(unsigned long nr, const volatile void *addr)
+{
+ int * a = (int *) addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ return ((mask & *a) != 0);
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ __constant_test_bit((nr),(addr)) : \
+ __test_bit((nr),(addr)))
+
+#include <asm-generic/bitops/find.h>
+
+/**
+ * fls - find last bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs:
+ * - return 32..1 to indicate bit 31..0 most significant bit set
+ * - return 0 to indicate no bits set
+ */
+#define fls(x) \
+({ \
+ int bit; \
+ \
+ asm(" subcc %1,gr0,gr0,icc0 \n" \
+ " ckne icc0,cc4 \n" \
+ " cscan.p %1,gr0,%0 ,cc4,#1 \n" \
+ " csub %0,%0,%0 ,cc4,#0 \n" \
+ " csub %2,%0,%0 ,cc4,#1 \n" \
+ : "=&r"(bit) \
+ : "r"(x), "r"(32) \
+ : "icc0", "cc4" \
+ ); \
+ \
+ bit; \
+})
+
+/**
+ * fls64 - find last bit set in a 64-bit value
+ * @n: the value to search
+ *
+ * This is defined the same way as ffs:
+ * - return 64..1 to indicate bit 63..0 most significant bit set
+ * - return 0 to indicate no bits set
+ */
+static inline __attribute__((const))
+int fls64(u64 n)
+{
+ union {
+ u64 ll;
+ struct { u32 h, l; };
+ } _;
+ int bit, x, y;
+
+ _.ll = n;
+
+ asm(" subcc.p %3,gr0,gr0,icc0 \n"
+ " subcc %4,gr0,gr0,icc1 \n"
+ " ckne icc0,cc4 \n"
+ " ckne icc1,cc5 \n"
+ " norcr cc4,cc5,cc6 \n"
+ " csub.p %0,%0,%0 ,cc6,1 \n"
+ " orcr cc5,cc4,cc4 \n"
+ " andcr cc4,cc5,cc4 \n"
+ " cscan.p %3,gr0,%0 ,cc4,0 \n"
+ " setlos #64,%1 \n"
+ " cscan.p %4,gr0,%0 ,cc4,1 \n"
+ " setlos #32,%2 \n"
+ " csub.p %1,%0,%0 ,cc4,0 \n"
+ " csub %2,%0,%0 ,cc4,1 \n"
+ : "=&r"(bit), "=r"(x), "=r"(y)
+ : "0r"(_.h), "r"(_.l)
+ : "icc0", "icc1", "cc4", "cc5", "cc6"
+ );
+ return bit;
+
+}
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * - return 32..1 to indicate bit 31..0 most least significant bit set
+ * - return 0 to indicate no bits set
+ */
+static inline __attribute__((const))
+int ffs(int x)
+{
+ /* Note: (x & -x) gives us a mask that is the least significant
+ * (rightmost) 1-bit of the value in x.
+ */
+ return fls(x & -x);
+}
+
+/**
+ * __ffs - find first bit set
+ * @x: the word to search
+ *
+ * - return 31..0 to indicate bit 31..0 most least significant bit set
+ * - if no bits are set in x, the result is undefined
+ */
+static inline __attribute__((const))
+int __ffs(unsigned long x)
+{
+ int bit;
+ asm("scan %1,gr0,%0" : "=r"(bit) : "r"(x & -x));
+ return 31 - bit;
+}
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+ unsigned long bit;
+ asm("scan %1,gr0,%0" : "=r"(bit) : "r"(word));
+ return bit;
+}
+
+/*
+ * special slimline version of fls() for calculating ilog2_u32()
+ * - note: no protection against n == 0
+ */
+#define ARCH_HAS_ILOG2_U32
+static inline __attribute__((const))
+int __ilog2_u32(u32 n)
+{
+ int bit;
+ asm("scan %1,gr0,%0" : "=r"(bit) : "r"(n));
+ return 31 - bit;
+}
+
+/*
+ * special slimline version of fls64() for calculating ilog2_u64()
+ * - note: no protection against n == 0
+ */
+#define ARCH_HAS_ILOG2_U64
+static inline __attribute__((const))
+int __ilog2_u64(u64 n)
+{
+ union {
+ u64 ll;
+ struct { u32 h, l; };
+ } _;
+ int bit, x, y;
+
+ _.ll = n;
+
+ asm(" subcc %3,gr0,gr0,icc0 \n"
+ " ckeq icc0,cc4 \n"
+ " cscan.p %3,gr0,%0 ,cc4,0 \n"
+ " setlos #63,%1 \n"
+ " cscan.p %4,gr0,%0 ,cc4,1 \n"
+ " setlos #31,%2 \n"
+ " csub.p %1,%0,%0 ,cc4,0 \n"
+ " csub %2,%0,%0 ,cc4,1 \n"
+ : "=&r"(bit), "=r"(x), "=r"(y)
+ : "0r"(_.h), "r"(_.l)
+ : "icc0", "cc4"
+ );
+ return bit;
+}
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/le.h>
+
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_BITOPS_H */
diff --git a/arch/frv/include/asm/bitsperlong.h b/arch/frv/include/asm/bitsperlong.h
new file mode 100644
index 00000000000..6dc0bb0c13b
--- /dev/null
+++ b/arch/frv/include/asm/bitsperlong.h
@@ -0,0 +1 @@
+#include <asm-generic/bitsperlong.h>
diff --git a/arch/frv/include/asm/bug.h b/arch/frv/include/asm/bug.h
new file mode 100644
index 00000000000..2e054508a2f
--- /dev/null
+++ b/arch/frv/include/asm/bug.h
@@ -0,0 +1,54 @@
+/* bug.h: FRV bug trapping
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_BUG_H
+#define _ASM_BUG_H
+
+#include <linux/linkage.h>
+
+#ifdef CONFIG_BUG
+/*
+ * Tell the user there is some problem.
+ */
+extern asmlinkage void __debug_bug_trap(int signr);
+
+#ifdef CONFIG_NO_KERNEL_MSG
+#define _debug_bug_printk()
+#else
+extern void __debug_bug_printk(const char *file, unsigned line);
+#define _debug_bug_printk() __debug_bug_printk(__FILE__, __LINE__)
+#endif
+
+#define _debug_bug_trap(signr) \
+do { \
+ __debug_bug_trap(signr); \
+ asm volatile("nop"); \
+} while(1)
+
+#define HAVE_ARCH_BUG
+#define BUG() \
+do { \
+ _debug_bug_printk(); \
+ _debug_bug_trap(6 /*SIGABRT*/); \
+} while (0)
+
+#ifdef CONFIG_GDBSTUB
+#define HAVE_ARCH_KGDB_RAISE
+#define kgdb_raise(signr) do { _debug_bug_trap(signr); } while(0)
+
+#define HAVE_ARCH_KGDB_BAD_PAGE
+#define kgdb_bad_page(page) do { kgdb_raise(SIGABRT); } while(0)
+#endif
+
+#endif /* CONFIG_BUG */
+
+#include <asm-generic/bug.h>
+
+#endif
diff --git a/arch/frv/include/asm/bugs.h b/arch/frv/include/asm/bugs.h
new file mode 100644
index 00000000000..f2382be2b46
--- /dev/null
+++ b/arch/frv/include/asm/bugs.h
@@ -0,0 +1,14 @@
+/* bugs.h: arch bug checking entry
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+static inline void check_bugs(void)
+{
+}
diff --git a/arch/frv/include/asm/busctl-regs.h b/arch/frv/include/asm/busctl-regs.h
new file mode 100644
index 00000000000..bb0ff4816e2
--- /dev/null
+++ b/arch/frv/include/asm/busctl-regs.h
@@ -0,0 +1,41 @@
+/* busctl-regs.h: FR400-series CPU bus controller registers
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_BUSCTL_REGS_H
+#define _ASM_BUSCTL_REGS_H
+
+/* bus controller registers */
+#define __get_LGCR() ({ *(volatile unsigned long *)(0xfe000010); })
+#define __get_LMAICR() ({ *(volatile unsigned long *)(0xfe000030); })
+#define __get_LEMBR() ({ *(volatile unsigned long *)(0xfe000040); })
+#define __get_LEMAM() ({ *(volatile unsigned long *)(0xfe000048); })
+#define __get_LCR(R) ({ *(volatile unsigned long *)(0xfe000100 + 8*(R)); })
+#define __get_LSBR(R) ({ *(volatile unsigned long *)(0xfe000c00 + 8*(R)); })
+#define __get_LSAM(R) ({ *(volatile unsigned long *)(0xfe000d00 + 8*(R)); })
+
+#define __set_LGCR(V) do { *(volatile unsigned long *)(0xfe000010) = (V); } while(0)
+#define __set_LMAICR(V) do { *(volatile unsigned long *)(0xfe000030) = (V); } while(0)
+#define __set_LEMBR(V) do { *(volatile unsigned long *)(0xfe000040) = (V); } while(0)
+#define __set_LEMAM(V) do { *(volatile unsigned long *)(0xfe000048) = (V); } while(0)
+#define __set_LCR(R,V) do { *(volatile unsigned long *)(0xfe000100 + 8*(R)) = (V); } while(0)
+#define __set_LSBR(R,V) do { *(volatile unsigned long *)(0xfe000c00 + 8*(R)) = (V); } while(0)
+#define __set_LSAM(R,V) do { *(volatile unsigned long *)(0xfe000d00 + 8*(R)) = (V); } while(0)
+
+/* FR401 SDRAM controller registers */
+#define __get_DBR(R) ({ *(volatile unsigned long *)(0xfe000e00 + 8*(R)); })
+#define __get_DAM(R) ({ *(volatile unsigned long *)(0xfe000f00 + 8*(R)); })
+
+/* FR551 SDRAM controller registers */
+#define __get_DARS(R) ({ *(volatile unsigned long *)(0xfeff0100 + 8*(R)); })
+#define __get_DAMK(R) ({ *(volatile unsigned long *)(0xfeff0110 + 8*(R)); })
+
+
+#endif /* _ASM_BUSCTL_REGS_H */
diff --git a/arch/frv/include/asm/byteorder.h b/arch/frv/include/asm/byteorder.h
new file mode 100644
index 00000000000..f29b7593e08
--- /dev/null
+++ b/arch/frv/include/asm/byteorder.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_BYTEORDER_H
+#define _ASM_BYTEORDER_H
+
+#include <linux/byteorder/big_endian.h>
+
+#endif /* _ASM_BYTEORDER_H */
diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
new file mode 100644
index 00000000000..2797163b8f4
--- /dev/null
+++ b/arch/frv/include/asm/cache.h
@@ -0,0 +1,23 @@
+/* cache.h: FRV cache definitions
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASM_CACHE_H
+#define __ASM_CACHE_H
+
+
+/* bytes per L1 cache line */
+#define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+
+#endif
diff --git a/arch/frv/include/asm/cacheflush.h b/arch/frv/include/asm/cacheflush.h
new file mode 100644
index 00000000000..edbac54ae01
--- /dev/null
+++ b/arch/frv/include/asm/cacheflush.h
@@ -0,0 +1,105 @@
+/* cacheflush.h: FRV cache flushing routines
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+/* Keep includes the same across arches. */
+#include <linux/mm.h>
+
+/*
+ * virtually-indexed cache management (our cache is physically indexed)
+ */
+#define flush_cache_all() do {} while(0)
+#define flush_cache_mm(mm) do {} while(0)
+#define flush_cache_dup_mm(mm) do {} while(0)
+#define flush_cache_range(mm, start, end) do {} while(0)
+#define flush_cache_page(vma, vmaddr, pfn) do {} while(0)
+#define flush_cache_vmap(start, end) do {} while(0)
+#define flush_cache_vunmap(start, end) do {} while(0)
+#define flush_dcache_mmap_lock(mapping) do {} while(0)
+#define flush_dcache_mmap_unlock(mapping) do {} while(0)
+
+/*
+ * physically-indexed cache management
+ * - see arch/frv/lib/cache.S
+ */
+extern void frv_dcache_writeback(unsigned long start, unsigned long size);
+extern void frv_cache_invalidate(unsigned long start, unsigned long size);
+extern void frv_icache_invalidate(unsigned long start, unsigned long size);
+extern void frv_cache_wback_inv(unsigned long start, unsigned long size);
+
+static inline void __flush_cache_all(void)
+{
+ asm volatile(" dcef @(gr0,gr0),#1 \n"
+ " icei @(gr0,gr0),#1 \n"
+ " membar \n"
+ : : : "memory"
+ );
+}
+
+/* dcache/icache coherency... */
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+#ifdef CONFIG_MMU
+extern void flush_dcache_page(struct page *page);
+#else
+static inline void flush_dcache_page(struct page *page)
+{
+ unsigned long addr = page_to_phys(page);
+ frv_dcache_writeback(addr, addr + PAGE_SIZE);
+}
+#endif
+
+static inline void flush_page_to_ram(struct page *page)
+{
+ flush_dcache_page(page);
+}
+
+static inline void flush_icache(void)
+{
+ __flush_cache_all();
+}
+
+static inline void flush_icache_range(unsigned long start, unsigned long end)
+{
+ frv_cache_wback_inv(start, end);
+}
+
+#ifdef CONFIG_MMU
+extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long start, unsigned long len);
+#else
+static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long start, unsigned long len)
+{
+ frv_cache_wback_inv(start, start + len);
+}
+#endif
+
+static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE);
+}
+
+/*
+ * permit ptrace to access another process's address space through the icache
+ * and the dcache
+ */
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy((dst), (src), (len)); \
+ flush_icache_user_range((vma), (page), (vaddr), (len)); \
+} while(0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy((dst), (src), (len))
+
+#endif /* _ASM_CACHEFLUSH_H */
diff --git a/arch/frv/include/asm/checksum.h b/arch/frv/include/asm/checksum.h
new file mode 100644
index 00000000000..269da09ff63
--- /dev/null
+++ b/arch/frv/include/asm/checksum.h
@@ -0,0 +1,180 @@
+/* checksum.h: FRV checksumming
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_CHECKSUM_H
+#define _ASM_CHECKSUM_H
+
+#include <linux/in6.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *csum_err);
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ *
+ */
+static inline
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int tmp, inc, sum = 0;
+
+ asm(" addcc gr0,gr0,gr0,icc0\n" /* clear icc0.C */
+ " subi %1,#4,%1 \n"
+ "0: \n"
+ " ldu.p @(%1,%3),%4 \n"
+ " subicc %2,#1,%2,icc1 \n"
+ " addxcc.p %4,%0,%0,icc0 \n"
+ " bhi icc1,#2,0b \n"
+
+ /* fold the 33-bit result into 16-bits */
+ " addxcc gr0,%0,%0,icc0 \n"
+ " srli %0,#16,%1 \n"
+ " sethi #0,%0 \n"
+ " add %1,%0,%0 \n"
+ " srli %0,#16,%1 \n"
+ " add %1,%0,%0 \n"
+
+ : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (inc), "=&r"(tmp)
+ : "0" (sum), "1" (iph), "2" (ihl), "3" (4),
+ "m"(*(volatile struct { int _[100]; } *)iph)
+ : "icc0", "icc1", "memory"
+ );
+
+ return (__force __sum16)~sum;
+}
+
+/*
+ * Fold a partial checksum
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+ unsigned int tmp;
+
+ asm(" srli %0,#16,%1 \n"
+ " sethi #0,%0 \n"
+ " add %1,%0,%0 \n"
+ " srli %0,#16,%1 \n"
+ " add %1,%0,%0 \n"
+ : "=r"(sum), "=&r"(tmp)
+ : "0"(sum)
+ );
+
+ return (__force __sum16)~sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ asm(" addcc %1,%0,%0,icc0 \n"
+ " addxcc %2,%0,%0,icc0 \n"
+ " addxcc %3,%0,%0,icc0 \n"
+ " addxcc gr0,%0,%0,icc0 \n"
+ : "=r" (sum)
+ : "r" (daddr), "r" (saddr), "r" (len + proto), "0"(sum)
+ : "icc0"
+ );
+ return sum;
+}
+
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+#define _HAVE_ARCH_IPV6_CSUM
+static inline __sum16
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
+ __u32 len, unsigned short proto, __wsum sum)
+{
+ unsigned long tmp, tmp2;
+
+ asm(" addcc %2,%0,%0,icc0 \n"
+
+ /* add up the source addr */
+ " ldi @(%3,0),%1 \n"
+ " addxcc %1,%0,%0,icc0 \n"
+ " ldi @(%3,4),%2 \n"
+ " addxcc %2,%0,%0,icc0 \n"
+ " ldi @(%3,8),%1 \n"
+ " addxcc %1,%0,%0,icc0 \n"
+ " ldi @(%3,12),%2 \n"
+ " addxcc %2,%0,%0,icc0 \n"
+
+ /* add up the dest addr */
+ " ldi @(%4,0),%1 \n"
+ " addxcc %1,%0,%0,icc0 \n"
+ " ldi @(%4,4),%2 \n"
+ " addxcc %2,%0,%0,icc0 \n"
+ " ldi @(%4,8),%1 \n"
+ " addxcc %1,%0,%0,icc0 \n"
+ " ldi @(%4,12),%2 \n"
+ " addxcc %2,%0,%0,icc0 \n"
+
+ /* fold the 33-bit result into 16-bits */
+ " addxcc gr0,%0,%0,icc0 \n"
+ " srli %0,#16,%1 \n"
+ " sethi #0,%0 \n"
+ " add %1,%0,%0 \n"
+ " srli %0,#16,%1 \n"
+ " add %1,%0,%0 \n"
+
+ : "=r" (sum), "=&r" (tmp), "=r" (tmp2)
+ : "r" (saddr), "r" (daddr), "0" (sum), "2" (len + proto)
+ : "icc0"
+ );
+
+ return (__force __sum16)~sum;
+}
+
+#endif /* _ASM_CHECKSUM_H */
diff --git a/arch/frv/include/asm/cpu-irqs.h b/arch/frv/include/asm/cpu-irqs.h
new file mode 100644
index 00000000000..478f3498fcf
--- /dev/null
+++ b/arch/frv/include/asm/cpu-irqs.h
@@ -0,0 +1,81 @@
+/* cpu-irqs.h: on-CPU peripheral irqs
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_CPU_IRQS_H
+#define _ASM_CPU_IRQS_H
+
+#ifndef __ASSEMBLY__
+
+/* IRQ to level mappings */
+#define IRQ_GDBSTUB_LEVEL 15
+#define IRQ_UART_LEVEL 13
+
+#ifdef CONFIG_GDBSTUB_UART0
+#define IRQ_UART0_LEVEL IRQ_GDBSTUB_LEVEL
+#else
+#define IRQ_UART0_LEVEL IRQ_UART_LEVEL
+#endif
+
+#ifdef CONFIG_GDBSTUB_UART1
+#define IRQ_UART1_LEVEL IRQ_GDBSTUB_LEVEL
+#else
+#define IRQ_UART1_LEVEL IRQ_UART_LEVEL
+#endif
+
+#define IRQ_DMA0_LEVEL 14
+#define IRQ_DMA1_LEVEL 14
+#define IRQ_DMA2_LEVEL 14
+#define IRQ_DMA3_LEVEL 14
+#define IRQ_DMA4_LEVEL 14
+#define IRQ_DMA5_LEVEL 14
+#define IRQ_DMA6_LEVEL 14
+#define IRQ_DMA7_LEVEL 14
+
+#define IRQ_TIMER0_LEVEL 12
+#define IRQ_TIMER1_LEVEL 11
+#define IRQ_TIMER2_LEVEL 10
+
+#define IRQ_XIRQ0_LEVEL 1
+#define IRQ_XIRQ1_LEVEL 2
+#define IRQ_XIRQ2_LEVEL 3
+#define IRQ_XIRQ3_LEVEL 4
+#define IRQ_XIRQ4_LEVEL 5
+#define IRQ_XIRQ5_LEVEL 6
+#define IRQ_XIRQ6_LEVEL 7
+#define IRQ_XIRQ7_LEVEL 8
+
+/* IRQ IDs presented to drivers */
+#define IRQ_CPU__UNUSED IRQ_BASE_CPU
+#define IRQ_CPU_UART0 (IRQ_BASE_CPU + IRQ_UART0_LEVEL)
+#define IRQ_CPU_UART1 (IRQ_BASE_CPU + IRQ_UART1_LEVEL)
+#define IRQ_CPU_TIMER0 (IRQ_BASE_CPU + IRQ_TIMER0_LEVEL)
+#define IRQ_CPU_TIMER1 (IRQ_BASE_CPU + IRQ_TIMER1_LEVEL)
+#define IRQ_CPU_TIMER2 (IRQ_BASE_CPU + IRQ_TIMER2_LEVEL)
+#define IRQ_CPU_DMA0 (IRQ_BASE_CPU + IRQ_DMA0_LEVEL)
+#define IRQ_CPU_DMA1 (IRQ_BASE_CPU + IRQ_DMA1_LEVEL)
+#define IRQ_CPU_DMA2 (IRQ_BASE_CPU + IRQ_DMA2_LEVEL)
+#define IRQ_CPU_DMA3 (IRQ_BASE_CPU + IRQ_DMA3_LEVEL)
+#define IRQ_CPU_DMA4 (IRQ_BASE_CPU + IRQ_DMA4_LEVEL)
+#define IRQ_CPU_DMA5 (IRQ_BASE_CPU + IRQ_DMA5_LEVEL)
+#define IRQ_CPU_DMA6 (IRQ_BASE_CPU + IRQ_DMA6_LEVEL)
+#define IRQ_CPU_DMA7 (IRQ_BASE_CPU + IRQ_DMA7_LEVEL)
+#define IRQ_CPU_EXTERNAL0 (IRQ_BASE_CPU + IRQ_XIRQ0_LEVEL)
+#define IRQ_CPU_EXTERNAL1 (IRQ_BASE_CPU + IRQ_XIRQ1_LEVEL)
+#define IRQ_CPU_EXTERNAL2 (IRQ_BASE_CPU + IRQ_XIRQ2_LEVEL)
+#define IRQ_CPU_EXTERNAL3 (IRQ_BASE_CPU + IRQ_XIRQ3_LEVEL)
+#define IRQ_CPU_EXTERNAL4 (IRQ_BASE_CPU + IRQ_XIRQ4_LEVEL)
+#define IRQ_CPU_EXTERNAL5 (IRQ_BASE_CPU + IRQ_XIRQ5_LEVEL)
+#define IRQ_CPU_EXTERNAL6 (IRQ_BASE_CPU + IRQ_XIRQ6_LEVEL)
+#define IRQ_CPU_EXTERNAL7 (IRQ_BASE_CPU + IRQ_XIRQ7_LEVEL)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_CPU_IRQS_H */
diff --git a/arch/frv/include/asm/cpumask.h b/arch/frv/include/asm/cpumask.h
new file mode 100644
index 00000000000..d999c20c84d
--- /dev/null
+++ b/arch/frv/include/asm/cpumask.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_CPUMASK_H
+#define _ASM_CPUMASK_H
+
+#include <asm-generic/cpumask.h>
+
+#endif /* _ASM_CPUMASK_H */
diff --git a/arch/frv/include/asm/cputime.h b/arch/frv/include/asm/cputime.h
new file mode 100644
index 00000000000..f6c373ad2b8
--- /dev/null
+++ b/arch/frv/include/asm/cputime.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_CPUTIME_H
+#define _ASM_CPUTIME_H
+
+#include <asm-generic/cputime.h>
+
+#endif /* _ASM_CPUTIME_H */
diff --git a/arch/frv/include/asm/current.h b/arch/frv/include/asm/current.h
new file mode 100644
index 00000000000..86b027491b0
--- /dev/null
+++ b/arch/frv/include/asm/current.h
@@ -0,0 +1,30 @@
+/* current.h: FRV current task pointer
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_CURRENT_H
+#define _ASM_CURRENT_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * dedicate GR29 to keeping the current task pointer
+ */
+register struct task_struct *current asm("gr29");
+
+#define get_current() current
+
+#else
+
+#define CURRENT gr29
+
+#endif
+
+#endif /* _ASM_CURRENT_H */
diff --git a/arch/frv/include/asm/delay.h b/arch/frv/include/asm/delay.h
new file mode 100644
index 00000000000..597b4ebf03b
--- /dev/null
+++ b/arch/frv/include/asm/delay.h
@@ -0,0 +1,50 @@
+/* delay.h: FRV delay code
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_DELAY_H
+#define _ASM_DELAY_H
+
+#include <asm/param.h>
+#include <asm/timer-regs.h>
+
+/*
+ * delay loop - runs at __core_clock_speed_HZ / 2 [there are 2 insns in the loop]
+ */
+extern unsigned long __delay_loops_MHz;
+
+static inline void __delay(unsigned long loops)
+{
+ asm volatile("1: subicc %0,#1,%0,icc0 \n"
+ " bnc icc0,#2,1b \n"
+ : "=r" (loops)
+ : "0" (loops)
+ : "icc0"
+ );
+}
+
+/*
+ * Use only for very small delays ( < 1 msec). Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays. This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+
+extern unsigned long loops_per_jiffy;
+
+static inline void udelay(unsigned long usecs)
+{
+ __delay(usecs * __delay_loops_MHz);
+}
+
+#define ndelay(n) udelay((n) * 5)
+
+#endif /* _ASM_DELAY_H */
diff --git a/arch/frv/include/asm/device.h b/arch/frv/include/asm/device.h
new file mode 100644
index 00000000000..d8f9872b0e2
--- /dev/null
+++ b/arch/frv/include/asm/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/arch/frv/include/asm/div64.h b/arch/frv/include/asm/div64.h
new file mode 100644
index 00000000000..6cd978cefb2
--- /dev/null
+++ b/arch/frv/include/asm/div64.h
@@ -0,0 +1 @@
+#include <asm-generic/div64.h>
diff --git a/arch/frv/include/asm/dm9000.h b/arch/frv/include/asm/dm9000.h
new file mode 100644
index 00000000000..f6f48fd9ec6
--- /dev/null
+++ b/arch/frv/include/asm/dm9000.h
@@ -0,0 +1,37 @@
+/* dm9000.h: Davicom DM9000 adapter configuration
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_DM9000_H
+#define _ASM_DM9000_H
+
+#include <asm/mb-regs.h>
+
+#define DM9000_ARCH_IOBASE (__region_CS6 + 0x300)
+#define DM9000_ARCH_IRQ IRQ_CPU_EXTERNAL3 /* XIRQ #3 (shared with FPGA) */
+#undef DM9000_ARCH_IRQ_ACTLOW /* IRQ pin active high */
+#define DM9000_ARCH_BUS_INFO "CS6#+0x300" /* bus info for ethtool */
+
+#undef __is_PCI_IO
+#define __is_PCI_IO(addr) 0 /* not PCI */
+
+#undef inl
+#define inl(addr) \
+({ \
+ unsigned long __ioaddr = (unsigned long) addr; \
+ uint32_t x = readl(__ioaddr); \
+ ((x & 0xff) << 24) | ((x & 0xff00) << 8) | ((x >> 8) & 0xff00) | ((x >> 24) & 0xff); \
+})
+
+#undef insl
+#define insl(a,b,l) __insl(a,b,l,0) /* don't byte-swap */
+
+
+#endif /* _ASM_DM9000_H */
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h
new file mode 100644
index 00000000000..dfb811002c6
--- /dev/null
+++ b/arch/frv/include/asm/dma-mapping.h
@@ -0,0 +1,135 @@
+#ifndef _ASM_DMA_MAPPING_H
+#define _ASM_DMA_MAPPING_H
+
+#include <linux/device.h>
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+#include <asm/scatterlist.h>
+#include <asm/io.h>
+
+/*
+ * See Documentation/DMA-API.txt for the description of how the
+ * following DMA API should work.
+ */
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+extern unsigned long __nongprelbss dma_coherent_mem_start;
+extern unsigned long __nongprelbss dma_coherent_mem_end;
+
+void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp);
+void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
+
+extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction);
+
+static inline
+void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction);
+
+static inline
+void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+extern
+dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction);
+
+static inline
+void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+
+static inline
+void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+}
+
+static inline
+void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+static inline
+void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+}
+
+static inline
+void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+static inline
+void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+}
+
+static inline
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+static inline
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline
+int dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < 0x00ffffff)
+ return 0;
+
+ return 1;
+}
+
+static inline
+int dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+
+static inline
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/frv/include/asm/dma.h b/arch/frv/include/asm/dma.h
new file mode 100644
index 00000000000..683c47d48a5
--- /dev/null
+++ b/arch/frv/include/asm/dma.h
@@ -0,0 +1,125 @@
+/* dma.h: FRV DMA controller management
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_DMA_H
+#define _ASM_DMA_H
+
+//#define DMA_DEBUG 1
+
+#include <linux/interrupt.h>
+
+#undef MAX_DMA_CHANNELS /* don't use kernel/dma.c */
+
+/* under 2.4 this is actually needed by the new bootmem allocator */
+#define MAX_DMA_ADDRESS PAGE_OFFSET
+
+/*
+ * FRV DMA controller management
+ */
+typedef irqreturn_t (*dma_irq_handler_t)(int dmachan, unsigned long cstr, void *data);
+
+extern void frv_dma_init(void);
+
+extern int frv_dma_open(const char *devname,
+ unsigned long dmamask,
+ int dmacap,
+ dma_irq_handler_t handler,
+ unsigned long irq_flags,
+ void *data);
+
+/* channels required */
+#define FRV_DMA_MASK_ANY ULONG_MAX /* any channel */
+
+/* capabilities required */
+#define FRV_DMA_CAP_DREQ 0x01 /* DMA request pin */
+#define FRV_DMA_CAP_DACK 0x02 /* DMA ACK pin */
+#define FRV_DMA_CAP_DONE 0x04 /* DMA done pin */
+
+extern void frv_dma_close(int dma);
+
+extern void frv_dma_config(int dma, unsigned long ccfr, unsigned long cctr, unsigned long apr);
+
+extern void frv_dma_start(int dma,
+ unsigned long sba, unsigned long dba,
+ unsigned long pix, unsigned long six, unsigned long bcl);
+
+extern void frv_dma_restart_circular(int dma, unsigned long six);
+
+extern void frv_dma_stop(int dma);
+
+extern int is_frv_dma_interrupting(int dma);
+
+extern void frv_dma_dump(int dma);
+
+extern void frv_dma_status_clear(int dma);
+
+#define FRV_DMA_NCHANS 8
+#define FRV_DMA_4CHANS 4
+#define FRV_DMA_8CHANS 8
+
+#define DMAC_CCFRx 0x00 /* channel configuration reg */
+#define DMAC_CCFRx_CM_SHIFT 16
+#define DMAC_CCFRx_CM_DA 0x00000000
+#define DMAC_CCFRx_CM_SCA 0x00010000
+#define DMAC_CCFRx_CM_DCA 0x00020000
+#define DMAC_CCFRx_CM_2D 0x00030000
+#define DMAC_CCFRx_ATS_SHIFT 8
+#define DMAC_CCFRx_RS_INTERN 0x00000000
+#define DMAC_CCFRx_RS_EXTERN 0x00000001
+#define DMAC_CCFRx_RS_SHIFT 0
+
+#define DMAC_CSTRx 0x08 /* channel status reg */
+#define DMAC_CSTRx_FS 0x0000003f
+#define DMAC_CSTRx_NE 0x00000100
+#define DMAC_CSTRx_FED 0x00000200
+#define DMAC_CSTRx_WER 0x00000800
+#define DMAC_CSTRx_RER 0x00001000
+#define DMAC_CSTRx_CE 0x00002000
+#define DMAC_CSTRx_INT 0x00800000
+#define DMAC_CSTRx_BUSY 0x80000000
+
+#define DMAC_CCTRx 0x10 /* channel control reg */
+#define DMAC_CCTRx_DSIZ_1 0x00000000
+#define DMAC_CCTRx_DSIZ_2 0x00000001
+#define DMAC_CCTRx_DSIZ_4 0x00000002
+#define DMAC_CCTRx_DSIZ_32 0x00000005
+#define DMAC_CCTRx_DAU_HOLD 0x00000000
+#define DMAC_CCTRx_DAU_INC 0x00000010
+#define DMAC_CCTRx_DAU_DEC 0x00000020
+#define DMAC_CCTRx_SSIZ_1 0x00000000
+#define DMAC_CCTRx_SSIZ_2 0x00000100
+#define DMAC_CCTRx_SSIZ_4 0x00000200
+#define DMAC_CCTRx_SSIZ_32 0x00000500
+#define DMAC_CCTRx_SAU_HOLD 0x00000000
+#define DMAC_CCTRx_SAU_INC 0x00001000
+#define DMAC_CCTRx_SAU_DEC 0x00002000
+#define DMAC_CCTRx_FC 0x08000000
+#define DMAC_CCTRx_ICE 0x10000000
+#define DMAC_CCTRx_IE 0x40000000
+#define DMAC_CCTRx_ACT 0x80000000
+
+#define DMAC_SBAx 0x18 /* source base address reg */
+#define DMAC_DBAx 0x20 /* data base address reg */
+#define DMAC_PIXx 0x28 /* primary index reg */
+#define DMAC_SIXx 0x30 /* secondary index reg */
+#define DMAC_BCLx 0x38 /* byte count limit reg */
+#define DMAC_APRx 0x40 /* alternate pointer reg */
+
+/*
+ * required for PCI + MODULES
+ */
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* _ASM_DMA_H */
diff --git a/arch/frv/include/asm/elf.h b/arch/frv/include/asm/elf.h
new file mode 100644
index 00000000000..c3819804a74
--- /dev/null
+++ b/arch/frv/include/asm/elf.h
@@ -0,0 +1,142 @@
+/* elf.h: FR-V ELF definitions
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ * - Derived from include/asm-m68knommu/elf.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef __ASM_ELF_H
+#define __ASM_ELF_H
+
+#include <asm/ptrace.h>
+#include <asm/user.h>
+
+struct elf32_hdr;
+
+/*
+ * ELF header e_flags defines.
+ */
+#define EF_FRV_GPR_MASK 0x00000003 /* mask for # of gprs */
+#define EF_FRV_GPR32 0x00000001 /* Only uses GR on 32-register */
+#define EF_FRV_GPR64 0x00000002 /* Only uses GR on 64-register */
+#define EF_FRV_FPR_MASK 0x0000000c /* mask for # of fprs */
+#define EF_FRV_FPR32 0x00000004 /* Only uses FR on 32-register */
+#define EF_FRV_FPR64 0x00000008 /* Only uses FR on 64-register */
+#define EF_FRV_FPR_NONE 0x0000000C /* Uses software floating-point */
+#define EF_FRV_DWORD_MASK 0x00000030 /* mask for dword support */
+#define EF_FRV_DWORD_YES 0x00000010 /* Assumes stack aligned to 8-byte boundaries. */
+#define EF_FRV_DWORD_NO 0x00000020 /* Assumes stack aligned to 4-byte boundaries. */
+#define EF_FRV_DOUBLE 0x00000040 /* Uses double instructions. */
+#define EF_FRV_MEDIA 0x00000080 /* Uses media instructions. */
+#define EF_FRV_PIC 0x00000100 /* Uses position independent code. */
+#define EF_FRV_NON_PIC_RELOCS 0x00000200 /* Does not use position Independent code. */
+#define EF_FRV_MULADD 0x00000400 /* -mmuladd */
+#define EF_FRV_BIGPIC 0x00000800 /* -fPIC */
+#define EF_FRV_LIBPIC 0x00001000 /* -mlibrary-pic */
+#define EF_FRV_G0 0x00002000 /* -G 0, no small data ptr */
+#define EF_FRV_NOPACK 0x00004000 /* -mnopack */
+#define EF_FRV_FDPIC 0x00008000 /* -mfdpic */
+#define EF_FRV_CPU_MASK 0xff000000 /* specific cpu bits */
+#define EF_FRV_CPU_GENERIC 0x00000000 /* Set CPU type is FR-V */
+#define EF_FRV_CPU_FR500 0x01000000 /* Set CPU type is FR500 */
+#define EF_FRV_CPU_FR300 0x02000000 /* Set CPU type is FR300 */
+#define EF_FRV_CPU_SIMPLE 0x03000000 /* SIMPLE */
+#define EF_FRV_CPU_TOMCAT 0x04000000 /* Tomcat, FR500 prototype */
+#define EF_FRV_CPU_FR400 0x05000000 /* Set CPU type is FR400 */
+#define EF_FRV_CPU_FR550 0x06000000 /* Set CPU type is FR550 */
+#define EF_FRV_CPU_FR405 0x07000000 /* Set CPU type is FR405 */
+#define EF_FRV_CPU_FR450 0x08000000 /* Set CPU type is FR450 */
+
+/*
+ * FR-V ELF relocation types
+ */
+
+
+/*
+ * ELF register definitions..
+ */
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct user_fpmedia_regs elf_fpregset_t;
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+extern int elf_check_arch(const struct elf32_hdr *hdr);
+
+#define elf_check_fdpic(x) ((x)->e_flags & EF_FRV_FDPIC && !((x)->e_flags & EF_FRV_NON_PIC_RELOCS))
+#define elf_check_const_displacement(x) ((x)->e_flags & EF_FRV_PIC)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2MSB
+#define ELF_ARCH EM_FRV
+
+#define ELF_PLAT_INIT(_r) \
+do { \
+ __kernel_frame0_ptr->gr16 = 0; \
+ __kernel_frame0_ptr->gr17 = 0; \
+ __kernel_frame0_ptr->gr18 = 0; \
+ __kernel_frame0_ptr->gr19 = 0; \
+ __kernel_frame0_ptr->gr20 = 0; \
+ __kernel_frame0_ptr->gr21 = 0; \
+ __kernel_frame0_ptr->gr22 = 0; \
+ __kernel_frame0_ptr->gr23 = 0; \
+ __kernel_frame0_ptr->gr24 = 0; \
+ __kernel_frame0_ptr->gr25 = 0; \
+ __kernel_frame0_ptr->gr26 = 0; \
+ __kernel_frame0_ptr->gr27 = 0; \
+ __kernel_frame0_ptr->gr29 = 0; \
+} while(0)
+
+#define ELF_FDPIC_PLAT_INIT(_regs, _exec_map_addr, _interp_map_addr, _dynamic_addr) \
+do { \
+ __kernel_frame0_ptr->gr16 = _exec_map_addr; \
+ __kernel_frame0_ptr->gr17 = _interp_map_addr; \
+ __kernel_frame0_ptr->gr18 = _dynamic_addr; \
+ __kernel_frame0_ptr->gr19 = 0; \
+ __kernel_frame0_ptr->gr20 = 0; \
+ __kernel_frame0_ptr->gr21 = 0; \
+ __kernel_frame0_ptr->gr22 = 0; \
+ __kernel_frame0_ptr->gr23 = 0; \
+ __kernel_frame0_ptr->gr24 = 0; \
+ __kernel_frame0_ptr->gr25 = 0; \
+ __kernel_frame0_ptr->gr26 = 0; \
+ __kernel_frame0_ptr->gr27 = 0; \
+ __kernel_frame0_ptr->gr29 = 0; \
+} while(0)
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC
+#define ELF_EXEC_PAGESIZE 16384
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE 0x08000000UL
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo. */
+
+#define ELF_PLATFORM (NULL)
+
+#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+
+#endif
diff --git a/arch/frv/include/asm/emergency-restart.h b/arch/frv/include/asm/emergency-restart.h
new file mode 100644
index 00000000000..108d8c48e42
--- /dev/null
+++ b/arch/frv/include/asm/emergency-restart.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_EMERGENCY_RESTART_H
+#define _ASM_EMERGENCY_RESTART_H
+
+#include <asm-generic/emergency-restart.h>
+
+#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/frv/include/asm/errno.h b/arch/frv/include/asm/errno.h
new file mode 100644
index 00000000000..d010795ceef
--- /dev/null
+++ b/arch/frv/include/asm/errno.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_ERRNO_H
+#define _ASM_ERRNO_H
+
+#include <asm-generic/errno.h>
+
+#endif /* _ASM_ERRNO_H */
+
diff --git a/arch/frv/include/asm/fb.h b/arch/frv/include/asm/fb.h
new file mode 100644
index 00000000000..c7df3803099
--- /dev/null
+++ b/arch/frv/include/asm/fb.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_FB_H_
+#define _ASM_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* _ASM_FB_H_ */
diff --git a/arch/frv/include/asm/fcntl.h b/arch/frv/include/asm/fcntl.h
new file mode 100644
index 00000000000..46ab12db573
--- /dev/null
+++ b/arch/frv/include/asm/fcntl.h
@@ -0,0 +1 @@
+#include <asm-generic/fcntl.h>
diff --git a/arch/frv/include/asm/fpu.h b/arch/frv/include/asm/fpu.h
new file mode 100644
index 00000000000..d73c60b5664
--- /dev/null
+++ b/arch/frv/include/asm/fpu.h
@@ -0,0 +1,11 @@
+#ifndef __ASM_FPU_H
+#define __ASM_FPU_H
+
+
+/*
+ * MAX floating point unit state size (FSAVE/FRESTORE)
+ */
+
+#define kernel_fpu_end() do { asm volatile("bar":::"memory"); preempt_enable(); } while(0)
+
+#endif /* __ASM_FPU_H */
diff --git a/arch/frv/include/asm/ftrace.h b/arch/frv/include/asm/ftrace.h
new file mode 100644
index 00000000000..40a8c178f10
--- /dev/null
+++ b/arch/frv/include/asm/ftrace.h
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h
new file mode 100644
index 00000000000..4bea27f50a7
--- /dev/null
+++ b/arch/frv/include/asm/futex.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <asm/errno.h>
+#include <asm/uaccess.h>
+
+extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ return -ENOSYS;
+}
+
+#endif
+#endif
diff --git a/arch/frv/include/asm/gdb-stub.h b/arch/frv/include/asm/gdb-stub.h
new file mode 100644
index 00000000000..e6bedd0cd9a
--- /dev/null
+++ b/arch/frv/include/asm/gdb-stub.h
@@ -0,0 +1,146 @@
+/* gdb-stub.h: FRV GDB stub
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ * - Derived from asm-mips/gdb-stub.h (c) 1995 Andreas Busse
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef __ASM_GDB_STUB_H
+#define __ASM_GDB_STUB_H
+
+#undef GDBSTUB_DEBUG_IO
+#undef GDBSTUB_DEBUG_PROTOCOL
+
+#include <asm/ptrace.h>
+
+/*
+ * important register numbers in GDB protocol
+ * - GR0, GR1, GR2, GR3, GR4, GR5, GR6, GR7,
+ * - GR8, GR9, GR10, GR11, GR12, GR13, GR14, GR15,
+ * - GR16, GR17, GR18, GR19, GR20, GR21, GR22, GR23,
+ * - GR24, GR25, GR26, GR27, GR28, GR29, GR30, GR31,
+ * - GR32, GR33, GR34, GR35, GR36, GR37, GR38, GR39,
+ * - GR40, GR41, GR42, GR43, GR44, GR45, GR46, GR47,
+ * - GR48, GR49, GR50, GR51, GR52, GR53, GR54, GR55,
+ * - GR56, GR57, GR58, GR59, GR60, GR61, GR62, GR63,
+ * - FR0, FR1, FR2, FR3, FR4, FR5, FR6, FR7,
+ * - FR8, FR9, FR10, FR11, FR12, FR13, FR14, FR15,
+ * - FR16, FR17, FR18, FR19, FR20, FR21, FR22, FR23,
+ * - FR24, FR25, FR26, FR27, FR28, FR29, FR30, FR31,
+ * - FR32, FR33, FR34, FR35, FR36, FR37, FR38, FR39,
+ * - FR40, FR41, FR42, FR43, FR44, FR45, FR46, FR47,
+ * - FR48, FR49, FR50, FR51, FR52, FR53, FR54, FR55,
+ * - FR56, FR57, FR58, FR59, FR60, FR61, FR62, FR63,
+ * - PC, PSR, CCR, CCCR,
+ * - _X132, _X133, _X134
+ * - TBR, BRR, DBAR0, DBAR1, DBAR2, DBAR3,
+ * - SCR0, SCR1, SCR2, SCR3,
+ * - LR, LCR,
+ * - IACC0H, IACC0L,
+ * - FSR0,
+ * - ACC0, ACC1, ACC2, ACC3, ACC4, ACC5, ACC6, ACC7,
+ * - ACCG0123, ACCG4567,
+ * - MSR0, MSR1,
+ * - GNER0, GNER1,
+ * - FNER0, FNER1,
+ */
+#define GDB_REG_GR(N) (N)
+#define GDB_REG_FR(N) (64+(N))
+#define GDB_REG_PC 128
+#define GDB_REG_PSR 129
+#define GDB_REG_CCR 130
+#define GDB_REG_CCCR 131
+#define GDB_REG_TBR 135
+#define GDB_REG_BRR 136
+#define GDB_REG_DBAR(N) (137+(N))
+#define GDB_REG_SCR(N) (141+(N))
+#define GDB_REG_LR 145
+#define GDB_REG_LCR 146
+#define GDB_REG_FSR0 149
+#define GDB_REG_ACC(N) (150+(N))
+#define GDB_REG_ACCG(N) (158+(N)/4)
+#define GDB_REG_MSR(N) (160+(N))
+#define GDB_REG_GNER(N) (162+(N))
+#define GDB_REG_FNER(N) (164+(N))
+
+#define GDB_REG_SP GDB_REG_GR(1)
+#define GDB_REG_FP GDB_REG_GR(2)
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+/*
+ * Prototypes
+ */
+extern void show_registers_only(struct pt_regs *regs);
+
+extern void gdbstub_init(void);
+extern void gdbstub(int type);
+extern void gdbstub_exit(int status);
+
+extern void gdbstub_io_init(void);
+extern void gdbstub_set_baud(unsigned baud);
+extern int gdbstub_rx_char(unsigned char *_ch, int nonblock);
+extern void gdbstub_tx_char(unsigned char ch);
+extern void gdbstub_tx_flush(void);
+extern void gdbstub_do_rx(void);
+
+extern asmlinkage void __debug_stub_init_break(void);
+extern asmlinkage void __break_hijack_kernel_event(void);
+extern asmlinkage void __break_hijack_kernel_event_breaks_here(void);
+
+extern asmlinkage void gdbstub_rx_handler(void);
+extern asmlinkage void gdbstub_rx_irq(void);
+extern asmlinkage void gdbstub_intercept(void);
+
+extern uint32_t __entry_usertrap_table[];
+extern uint32_t __entry_kerneltrap_table[];
+
+extern volatile u8 gdbstub_rx_buffer[PAGE_SIZE];
+extern volatile u32 gdbstub_rx_inp;
+extern volatile u32 gdbstub_rx_outp;
+extern volatile u8 gdbstub_rx_overflow;
+extern u8 gdbstub_rx_unget;
+
+extern void gdbstub_printk(const char *fmt, ...);
+extern void debug_to_serial(const char *p, int n);
+extern void console_set_baud(unsigned baud);
+
+#ifdef GDBSTUB_DEBUG_IO
+#define gdbstub_io(FMT,...) gdbstub_printk(FMT, ##__VA_ARGS__)
+#else
+#define gdbstub_io(FMT,...) ({ 0; })
+#endif
+
+#ifdef GDBSTUB_DEBUG_PROTOCOL
+#define gdbstub_proto(FMT,...) gdbstub_printk(FMT,##__VA_ARGS__)
+#else
+#define gdbstub_proto(FMT,...) ({ 0; })
+#endif
+
+/*
+ * we dedicate GR31 to keeping a pointer to the gdbstub exception frame
+ * - gr31 is destroyed on entry to the gdbstub if !MMU
+ * - gr31 is saved in scr3 on entry to the gdbstub if in !MMU
+ */
+register struct frv_frame0 *__debug_frame0 asm("gr31");
+
+#define __debug_frame (&__debug_frame0->regs)
+#define __debug_user_context (&__debug_frame0->uc)
+#define __debug_regs (&__debug_frame0->debug)
+#define __debug_reg(X) ((unsigned long *) ((unsigned long) &__debug_frame0 + (X)))
+
+struct frv_debug_status {
+ unsigned long bpsr;
+ unsigned long dcr;
+ unsigned long brr;
+ unsigned long nmar;
+};
+
+extern struct frv_debug_status __debug_status;
+
+#endif /* _LANGUAGE_ASSEMBLY */
+#endif /* __ASM_GDB_STUB_H */
diff --git a/arch/frv/include/asm/gpio-regs.h b/arch/frv/include/asm/gpio-regs.h
new file mode 100644
index 00000000000..9edf5d5d4d3
--- /dev/null
+++ b/arch/frv/include/asm/gpio-regs.h
@@ -0,0 +1,116 @@
+/* gpio-regs.h: on-chip general purpose I/O registers
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_GPIO_REGS
+#define _ASM_GPIO_REGS
+
+#define __reg(ADDR) (*(volatile unsigned long *)(ADDR))
+
+#define __get_PDR() ({ __reg(0xfeff0400); })
+#define __set_PDR(V) do { __reg(0xfeff0400) = (V); mb(); } while(0)
+
+#define __get_GPDR() ({ __reg(0xfeff0408); })
+#define __set_GPDR(V) do { __reg(0xfeff0408) = (V); mb(); } while(0)
+
+#define __get_SIR() ({ __reg(0xfeff0410); })
+#define __set_SIR(V) do { __reg(0xfeff0410) = (V); mb(); } while(0)
+
+#define __get_SOR() ({ __reg(0xfeff0418); })
+#define __set_SOR(V) do { __reg(0xfeff0418) = (V); mb(); } while(0)
+
+#define __set_PDSR(V) do { __reg(0xfeff0420) = (V); mb(); } while(0)
+
+#define __set_PDCR(V) do { __reg(0xfeff0428) = (V); mb(); } while(0)
+
+#define __get_RSTR() ({ __reg(0xfeff0500); })
+#define __set_RSTR(V) do { __reg(0xfeff0500) = (V); mb(); } while(0)
+
+
+
+/* PDR definitions */
+#define PDR_GPIO_DATA(X) (1 << (X))
+
+/* GPDR definitions */
+#define GPDR_INPUT 0
+#define GPDR_OUTPUT 1
+#define GPDR_DREQ0_BIT 0x00001000
+#define GPDR_DREQ1_BIT 0x00008000
+#define GPDR_DREQ2_BIT 0x00040000
+#define GPDR_DREQ3_BIT 0x00080000
+#define GPDR_DREQ4_BIT 0x00004000
+#define GPDR_DREQ5_BIT 0x00020000
+#define GPDR_DREQ6_BIT 0x00100000
+#define GPDR_DREQ7_BIT 0x00200000
+#define GPDR_DACK0_BIT 0x00002000
+#define GPDR_DACK1_BIT 0x00010000
+#define GPDR_DACK2_BIT 0x00100000
+#define GPDR_DACK3_BIT 0x00200000
+#define GPDR_DONE0_BIT 0x00004000
+#define GPDR_DONE1_BIT 0x00020000
+#define GPDR_GPIO_DIR(X,D) ((D) << (X))
+
+/* SIR definitions */
+#define SIR_GPIO_INPUT 0
+#define SIR_DREQ7_INPUT 0x00200000
+#define SIR_DREQ6_INPUT 0x00100000
+#define SIR_DREQ3_INPUT 0x00080000
+#define SIR_DREQ2_INPUT 0x00040000
+#define SIR_DREQ5_INPUT 0x00020000
+#define SIR_DREQ1_INPUT 0x00008000
+#define SIR_DREQ4_INPUT 0x00004000
+#define SIR_DREQ0_INPUT 0x00001000
+#define SIR_RXD1_INPUT 0x00000400
+#define SIR_CTS0_INPUT 0x00000100
+#define SIR_RXD0_INPUT 0x00000040
+#define SIR_GATE1_INPUT 0x00000020
+#define SIR_GATE0_INPUT 0x00000010
+#define SIR_IRQ3_INPUT 0x00000008
+#define SIR_IRQ2_INPUT 0x00000004
+#define SIR_IRQ1_INPUT 0x00000002
+#define SIR_IRQ0_INPUT 0x00000001
+#define SIR_DREQ_BITS (SIR_DREQ0_INPUT | SIR_DREQ1_INPUT | \
+ SIR_DREQ2_INPUT | SIR_DREQ3_INPUT | \
+ SIR_DREQ4_INPUT | SIR_DREQ5_INPUT | \
+ SIR_DREQ6_INPUT | SIR_DREQ7_INPUT)
+
+/* SOR definitions */
+#define SOR_GPIO_OUTPUT 0
+#define SOR_DACK3_OUTPUT 0x00200000
+#define SOR_DACK2_OUTPUT 0x00100000
+#define SOR_DONE1_OUTPUT 0x00020000
+#define SOR_DACK1_OUTPUT 0x00010000
+#define SOR_DONE0_OUTPUT 0x00004000
+#define SOR_DACK0_OUTPUT 0x00002000
+#define SOR_TXD1_OUTPUT 0x00000800
+#define SOR_RTS0_OUTPUT 0x00000200
+#define SOR_TXD0_OUTPUT 0x00000080
+#define SOR_TOUT1_OUTPUT 0x00000020
+#define SOR_TOUT0_OUTPUT 0x00000010
+#define SOR_DONE_BITS (SOR_DONE0_OUTPUT | SOR_DONE1_OUTPUT)
+#define SOR_DACK_BITS (SOR_DACK0_OUTPUT | SOR_DACK1_OUTPUT | \
+ SOR_DACK2_OUTPUT | SOR_DACK3_OUTPUT)
+
+/* PDSR definitions */
+#define PDSR_UNCHANGED 0
+#define PDSR_SET_BIT(X) (1 << (X))
+
+/* PDCR definitions */
+#define PDCR_UNCHANGED 0
+#define PDCR_CLEAR_BIT(X) (1 << (X))
+
+/* RSTR definitions */
+/* Read Only */
+#define RSTR_POWERON 0x00000400
+#define RSTR_SOFTRESET_STATUS 0x00000100
+/* Write Only */
+#define RSTR_SOFTRESET 0x00000001
+
+#endif /* _ASM_GPIO_REGS */
diff --git a/arch/frv/include/asm/hardirq.h b/arch/frv/include/asm/hardirq.h
new file mode 100644
index 00000000000..c62833d6ebb
--- /dev/null
+++ b/arch/frv/include/asm/hardirq.h
@@ -0,0 +1,26 @@
+/* hardirq.h: FRV hardware IRQ management
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <linux/atomic.h>
+
+extern atomic_t irq_err_count;
+static inline void ack_bad_irq(int irq)
+{
+ atomic_inc(&irq_err_count);
+}
+#define ack_bad_irq ack_bad_irq
+
+#include <asm-generic/hardirq.h>
+
+#endif
diff --git a/arch/frv/include/asm/highmem.h b/arch/frv/include/asm/highmem.h
new file mode 100644
index 00000000000..a8d6565d415
--- /dev/null
+++ b/arch/frv/include/asm/highmem.h
@@ -0,0 +1,167 @@
+/* highmem.h: virtual kernel memory mappings for high memory
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ * - Derived from include/asm-i386/highmem.h
+ *
+ * See Documentation/frv/mmu-layout.txt for more information.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_HIGHMEM_H
+#define _ASM_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <asm/mem-layout.h>
+#include <asm/spr-regs.h>
+#include <asm/mb-regs.h>
+
+#define NR_TLB_LINES 64 /* number of lines in the TLB */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/interrupt.h>
+#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+#define HIGHMEM_DEBUG 1
+#else
+#define HIGHMEM_DEBUG 0
+#endif
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+#define kmap_prot PAGE_KERNEL
+#define kmap_pte ______kmap_pte_in_TLB
+extern pte_t *pkmap_page_table;
+
+#define flush_cache_kmaps() do { } while (0)
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+#define LAST_PKMAP PTRS_PER_PTE
+#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+extern void *kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
+
+extern void *kmap(struct page *page);
+extern void kunmap(struct page *page);
+
+extern struct page *kmap_atomic_to_page(void *ptr);
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+#define KMAP_ATOMIC_CACHE_DAMR 8
+
+#ifndef __ASSEMBLY__
+
+#define __kmap_atomic_primary(type, paddr, ampr) \
+({ \
+ unsigned long damlr, dampr; \
+ \
+ dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
+ \
+ if (type != __KM_CACHE) \
+ asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \
+ else \
+ asm volatile("movgs %0,iampr"#ampr"\n" \
+ "movgs %0,dampr"#ampr"\n" \
+ :: "r"(dampr) : "memory" \
+ ); \
+ \
+ asm("movsg damlr"#ampr",%0" : "=r"(damlr)); \
+ \
+ /*printk("DAMR"#ampr": PRIM sl=%d L=%08lx P=%08lx\n", type, damlr, dampr);*/ \
+ \
+ (void *) damlr; \
+})
+
+#define __kmap_atomic_secondary(slot, paddr) \
+({ \
+ unsigned long damlr = KMAP_ATOMIC_SECONDARY_FRAME + (slot) * PAGE_SIZE; \
+ unsigned long dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
+ \
+ asm volatile("movgs %0,tplr \n" \
+ "movgs %1,tppr \n" \
+ "tlbpr %0,gr0,#2,#1" \
+ : : "r"(damlr), "r"(dampr) : "memory"); \
+ \
+ /*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/ \
+ \
+ (void *) damlr; \
+})
+
+static inline void *kmap_atomic_primary(struct page *page, enum km_type type)
+{
+ unsigned long paddr;
+
+ pagefault_disable();
+ paddr = page_to_phys(page);
+
+ switch (type) {
+ case 0: return __kmap_atomic_primary(0, paddr, 2);
+ case 1: return __kmap_atomic_primary(1, paddr, 3);
+ case 2: return __kmap_atomic_primary(2, paddr, 4);
+ case 3: return __kmap_atomic_primary(3, paddr, 5);
+
+ default:
+ BUG();
+ return NULL;
+ }
+}
+
+#define __kunmap_atomic_primary(type, ampr) \
+do { \
+ asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \
+ if (type == __KM_CACHE) \
+ asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \
+} while(0)
+
+#define __kunmap_atomic_secondary(slot, vaddr) \
+do { \
+ asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
+} while(0)
+
+static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
+{
+ switch (type) {
+ case 0: __kunmap_atomic_primary(0, 2); break;
+ case 1: __kunmap_atomic_primary(1, 3); break;
+ case 2: __kunmap_atomic_primary(2, 4); break;
+ case 3: __kunmap_atomic_primary(3, 5); break;
+
+ default:
+ BUG();
+ }
+ pagefault_enable();
+}
+
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/frv/include/asm/hw_irq.h b/arch/frv/include/asm/hw_irq.h
new file mode 100644
index 00000000000..522ad37923d
--- /dev/null
+++ b/arch/frv/include/asm/hw_irq.h
@@ -0,0 +1,16 @@
+/* hw_irq.h: FR-V specific h/w IRQ stuff
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_HW_IRQ_H
+#define _ASM_HW_IRQ_H
+
+
+#endif /* _ASM_HW_IRQ_H */
diff --git a/arch/frv/include/asm/io.h b/arch/frv/include/asm/io.h
new file mode 100644
index 00000000000..8cb50a2fbcb
--- /dev/null
+++ b/arch/frv/include/asm/io.h
@@ -0,0 +1,392 @@
+/* io.h: FRV I/O operations
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This gets interesting when talking to the PCI bus - the CPU is in big endian
+ * mode, the PCI bus is little endian and the hardware in the middle can do
+ * byte swapping
+ */
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/virtconvert.h>
+#include <asm/string.h>
+#include <asm/mb-regs.h>
+#include <asm-generic/pci_iomap.h>
+#include <linux/delay.h>
+
+/*
+ * swap functions are sometimes needed to interface little-endian hardware
+ */
+
+static inline unsigned short _swapw(unsigned short v)
+{
+ return ((v << 8) | (v >> 8));
+}
+
+static inline unsigned long _swapl(unsigned long v)
+{
+ return ((v << 24) | ((v & 0xff00) << 8) | ((v & 0xff0000) >> 8) | (v >> 24));
+}
+
+//#define __iormb() asm volatile("membar")
+//#define __iowmb() asm volatile("membar")
+
+#define __raw_readb __builtin_read8
+#define __raw_readw __builtin_read16
+#define __raw_readl __builtin_read32
+
+#define __raw_writeb(datum, addr) __builtin_write8(addr, datum)
+#define __raw_writew(datum, addr) __builtin_write16(addr, datum)
+#define __raw_writel(datum, addr) __builtin_write32(addr, datum)
+
+static inline void io_outsb(unsigned int addr, const void *buf, int len)
+{
+ unsigned long __ioaddr = (unsigned long) addr;
+ const uint8_t *bp = buf;
+
+ while (len--)
+ __builtin_write8((volatile void __iomem *) __ioaddr, *bp++);
+}
+
+static inline void io_outsw(unsigned int addr, const void *buf, int len)
+{
+ unsigned long __ioaddr = (unsigned long) addr;
+ const uint16_t *bp = buf;
+
+ while (len--)
+ __builtin_write16((volatile void __iomem *) __ioaddr, (*bp++));
+}
+
+extern void __outsl_ns(unsigned int addr, const void *buf, int len);
+extern void __outsl_sw(unsigned int addr, const void *buf, int len);
+static inline void __outsl(unsigned int addr, const void *buf, int len, int swap)
+{
+ unsigned long __ioaddr = (unsigned long) addr;
+
+ if (!swap)
+ __outsl_ns(__ioaddr, buf, len);
+ else
+ __outsl_sw(__ioaddr, buf, len);
+}
+
+static inline void io_insb(unsigned long addr, void *buf, int len)
+{
+ uint8_t *bp = buf;
+
+ while (len--)
+ *bp++ = __builtin_read8((volatile void __iomem *) addr);
+}
+
+static inline void io_insw(unsigned long addr, void *buf, int len)
+{
+ uint16_t *bp = buf;
+
+ while (len--)
+ *bp++ = __builtin_read16((volatile void __iomem *) addr);
+}
+
+extern void __insl_ns(unsigned long addr, void *buf, int len);
+extern void __insl_sw(unsigned long addr, void *buf, int len);
+static inline void __insl(unsigned long addr, void *buf, int len, int swap)
+{
+ if (!swap)
+ __insl_ns(addr, buf, len);
+ else
+ __insl_sw(addr, buf, len);
+}
+
+#define mmiowb() mb()
+
+/*
+ * make the short names macros so specific devices
+ * can override them as required
+ */
+
+static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
+{
+ memset((void __force *) addr, val, count);
+}
+
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
+{
+ memcpy(dst, (void __force *) src, count);
+}
+
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
+{
+ memcpy((void __force *) dst, src, count);
+}
+
+static inline uint8_t inb(unsigned long addr)
+{
+ return __builtin_read8((void __iomem *)addr);
+}
+
+static inline uint16_t inw(unsigned long addr)
+{
+ uint16_t ret = __builtin_read16((void __iomem *)addr);
+
+ if (__is_PCI_IO(addr))
+ ret = _swapw(ret);
+
+ return ret;
+}
+
+static inline uint32_t inl(unsigned long addr)
+{
+ uint32_t ret = __builtin_read32((void __iomem *)addr);
+
+ if (__is_PCI_IO(addr))
+ ret = _swapl(ret);
+
+ return ret;
+}
+
+static inline void outb(uint8_t datum, unsigned long addr)
+{
+ __builtin_write8((void __iomem *)addr, datum);
+}
+
+static inline void outw(uint16_t datum, unsigned long addr)
+{
+ if (__is_PCI_IO(addr))
+ datum = _swapw(datum);
+ __builtin_write16((void __iomem *)addr, datum);
+}
+
+static inline void outl(uint32_t datum, unsigned long addr)
+{
+ if (__is_PCI_IO(addr))
+ datum = _swapl(datum);
+ __builtin_write32((void __iomem *)addr, datum);
+}
+
+#define inb_p(addr) inb(addr)
+#define inw_p(addr) inw(addr)
+#define inl_p(addr) inl(addr)
+#define outb_p(x,addr) outb(x,addr)
+#define outw_p(x,addr) outw(x,addr)
+#define outl_p(x,addr) outl(x,addr)
+
+#define outsb(a,b,l) io_outsb(a,b,l)
+#define outsw(a,b,l) io_outsw(a,b,l)
+#define outsl(a,b,l) __outsl(a,b,l,0)
+
+#define insb(a,b,l) io_insb(a,b,l)
+#define insw(a,b,l) io_insw(a,b,l)
+#define insl(a,b,l) __insl(a,b,l,0)
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+static inline uint8_t readb(const volatile void __iomem *addr)
+{
+ return __builtin_read8((__force void volatile __iomem *) addr);
+}
+
+static inline uint16_t readw(const volatile void __iomem *addr)
+{
+ uint16_t ret = __builtin_read16((__force void volatile __iomem *)addr);
+
+ if (__is_PCI_MEM(addr))
+ ret = _swapw(ret);
+ return ret;
+}
+
+static inline uint32_t readl(const volatile void __iomem *addr)
+{
+ uint32_t ret = __builtin_read32((__force void volatile __iomem *)addr);
+
+ if (__is_PCI_MEM(addr))
+ ret = _swapl(ret);
+
+ return ret;
+}
+
+#define readb_relaxed readb
+#define readw_relaxed readw
+#define readl_relaxed readl
+
+static inline void writeb(uint8_t datum, volatile void __iomem *addr)
+{
+ __builtin_write8(addr, datum);
+ if (__is_PCI_MEM(addr))
+ __flush_PCI_writes();
+}
+
+static inline void writew(uint16_t datum, volatile void __iomem *addr)
+{
+ if (__is_PCI_MEM(addr))
+ datum = _swapw(datum);
+
+ __builtin_write16(addr, datum);
+ if (__is_PCI_MEM(addr))
+ __flush_PCI_writes();
+}
+
+static inline void writel(uint32_t datum, volatile void __iomem *addr)
+{
+ if (__is_PCI_MEM(addr))
+ datum = _swapl(datum);
+
+ __builtin_write32(addr, datum);
+ if (__is_PCI_MEM(addr))
+ __flush_PCI_writes();
+}
+
+
+/* Values for nocacheflag and cmode */
+#define IOMAP_FULL_CACHING 0
+#define IOMAP_NOCACHE_SER 1
+#define IOMAP_NOCACHE_NONSER 2
+#define IOMAP_WRITETHROUGH 3
+
+extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
+
+static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
+{
+ return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
+}
+
+static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned long size)
+{
+ return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
+}
+
+static inline void __iomem *ioremap_writethrough(unsigned long physaddr, unsigned long size)
+{
+ return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
+}
+
+static inline void __iomem *ioremap_fullcache(unsigned long physaddr, unsigned long size)
+{
+ return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
+}
+
+#define ioremap_wc ioremap_nocache
+
+extern void iounmap(void volatile __iomem *addr);
+
+static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return (void __iomem *) port;
+}
+
+static inline void ioport_unmap(void __iomem *p)
+{
+}
+
+static inline void flush_write_buffers(void)
+{
+ __asm__ __volatile__ ("membar" : : :"memory");
+}
+
+/*
+ * do appropriate I/O accesses for token type
+ */
+static inline unsigned int ioread8(void __iomem *p)
+{
+ return __builtin_read8(p);
+}
+
+static inline unsigned int ioread16(void __iomem *p)
+{
+ uint16_t ret = __builtin_read16(p);
+ if (__is_PCI_addr(p))
+ ret = _swapw(ret);
+ return ret;
+}
+
+static inline unsigned int ioread32(void __iomem *p)
+{
+ uint32_t ret = __builtin_read32(p);
+ if (__is_PCI_addr(p))
+ ret = _swapl(ret);
+ return ret;
+}
+
+static inline void iowrite8(u8 val, void __iomem *p)
+{
+ __builtin_write8(p, val);
+ if (__is_PCI_MEM(p))
+ __flush_PCI_writes();
+}
+
+static inline void iowrite16(u16 val, void __iomem *p)
+{
+ if (__is_PCI_addr(p))
+ val = _swapw(val);
+ __builtin_write16(p, val);
+ if (__is_PCI_MEM(p))
+ __flush_PCI_writes();
+}
+
+static inline void iowrite32(u32 val, void __iomem *p)
+{
+ if (__is_PCI_addr(p))
+ val = _swapl(val);
+ __builtin_write32(p, val);
+ if (__is_PCI_MEM(p))
+ __flush_PCI_writes();
+}
+
+static inline void ioread8_rep(void __iomem *p, void *dst, unsigned long count)
+{
+ io_insb((unsigned long) p, dst, count);
+}
+
+static inline void ioread16_rep(void __iomem *p, void *dst, unsigned long count)
+{
+ io_insw((unsigned long) p, dst, count);
+}
+
+static inline void ioread32_rep(void __iomem *p, void *dst, unsigned long count)
+{
+ __insl_ns((unsigned long) p, dst, count);
+}
+
+static inline void iowrite8_rep(void __iomem *p, const void *src, unsigned long count)
+{
+ io_outsb((unsigned long) p, src, count);
+}
+
+static inline void iowrite16_rep(void __iomem *p, const void *src, unsigned long count)
+{
+ io_outsw((unsigned long) p, src, count);
+}
+
+static inline void iowrite32_rep(void __iomem *p, const void *src, unsigned long count)
+{
+ __outsl_ns((unsigned long) p, src, count);
+}
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+struct pci_dev;
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+}
+
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p) p
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_IO_H */
diff --git a/arch/frv/include/asm/ioctl.h b/arch/frv/include/asm/ioctl.h
new file mode 100644
index 00000000000..b279fe06dfe
--- /dev/null
+++ b/arch/frv/include/asm/ioctl.h
@@ -0,0 +1 @@
+#include <asm-generic/ioctl.h>
diff --git a/arch/frv/include/asm/ioctls.h b/arch/frv/include/asm/ioctls.h
new file mode 100644
index 00000000000..2f9fb436ec3
--- /dev/null
+++ b/arch/frv/include/asm/ioctls.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_IOCTLS_H__
+#define __ASM_IOCTLS_H__
+
+#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */
+#define FIOQSIZE 0x545E
+
+#include <asm-generic/ioctls.h>
+
+#endif /* __ASM_IOCTLS_H__ */
+
diff --git a/arch/frv/include/asm/ipcbuf.h b/arch/frv/include/asm/ipcbuf.h
new file mode 100644
index 00000000000..84c7e51cb6d
--- /dev/null
+++ b/arch/frv/include/asm/ipcbuf.h
@@ -0,0 +1 @@
+#include <asm-generic/ipcbuf.h>
diff --git a/arch/frv/include/asm/irc-regs.h b/arch/frv/include/asm/irc-regs.h
new file mode 100644
index 00000000000..afa30aeacc8
--- /dev/null
+++ b/arch/frv/include/asm/irc-regs.h
@@ -0,0 +1,53 @@
+/* irc-regs.h: on-chip interrupt controller registers
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_IRC_REGS
+#define _ASM_IRC_REGS
+
+#define __reg(ADDR) (*(volatile unsigned long *)(ADDR))
+
+#define __get_TM0() ({ __reg(0xfeff9800); })
+#define __get_TM1() ({ __reg(0xfeff9808); })
+#define __set_TM1(V) do { __reg(0xfeff9808) = (V); mb(); } while(0)
+
+#define __set_TM1x(XI,V) \
+do { \
+ int shift = (XI) * 2 + 16; \
+ unsigned long tm1 = __reg(0xfeff9808); \
+ tm1 &= ~(0x3 << shift); \
+ tm1 |= (V) << shift; \
+ __reg(0xfeff9808) = tm1; \
+ mb(); \
+} while(0)
+
+#define __get_RS(C) ({ (__reg(0xfeff9810) >> ((C)+16)) & 1; })
+
+#define __clr_RC(C) do { __reg(0xfeff9818) = 1 << ((C)+16); mb(); } while(0)
+
+#define __get_MASK(C) ({ (__reg(0xfeff9820) >> ((C)+16)) & 1; })
+#define __set_MASK(C) do { __reg(0xfeff9820) |= 1 << ((C)+16); mb(); } while(0)
+#define __clr_MASK(C) do { __reg(0xfeff9820) &= ~(1 << ((C)+16)); mb(); } while(0)
+
+#define __get_MASK_all() __get_MASK(0)
+#define __set_MASK_all() __set_MASK(0)
+#define __clr_MASK_all() __clr_MASK(0)
+
+#define __get_IRL() ({ (__reg(0xfeff9828) >> 16) & 0xf; })
+#define __clr_IRL() do { __reg(0xfeff9828) = 0x100000; mb(); } while(0)
+
+#define __get_IRR(N) ({ __reg(0xfeff9840 + (N) * 8); })
+#define __set_IRR(N,V) do { __reg(0xfeff9840 + (N) * 8) = (V); } while(0)
+
+#define __get_IITMR(N) ({ __reg(0xfeff9880 + (N) * 8); })
+#define __set_IITMR(N,V) do { __reg(0xfeff9880 + (N) * 8) = (V); } while(0)
+
+
+#endif /* _ASM_IRC_REGS */
diff --git a/arch/frv/include/asm/irq.h b/arch/frv/include/asm/irq.h
new file mode 100644
index 00000000000..3a66ebd754b
--- /dev/null
+++ b/arch/frv/include/asm/irq.h
@@ -0,0 +1,30 @@
+/* irq.h: FRV IRQ definitions
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_IRQ_H_
+#define _ASM_IRQ_H_
+
+#define NR_IRQS 48
+#define IRQ_BASE_CPU (0 * 16)
+#define IRQ_BASE_FPGA (1 * 16)
+#define IRQ_BASE_MB93493 (2 * 16)
+
+/* probe returns a 32-bit IRQ mask:-/ */
+#define MIN_PROBE_IRQ (NR_IRQS - 32)
+
+#ifndef __ASSEMBLY__
+static inline int irq_canonicalize(int irq)
+{
+ return irq;
+}
+#endif
+
+#endif /* _ASM_IRQ_H_ */
diff --git a/arch/frv/include/asm/irq_regs.h b/arch/frv/include/asm/irq_regs.h
new file mode 100644
index 00000000000..d22e83289ad
--- /dev/null
+++ b/arch/frv/include/asm/irq_regs.h
@@ -0,0 +1,27 @@
+/* FRV per-CPU frame pointer holder
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_IRQ_REGS_H
+#define _ASM_IRQ_REGS_H
+
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack
+ * - on FRV, GR28 is dedicated to keeping a pointer to the current exception
+ * frame
+ */
+#define ARCH_HAS_OWN_IRQ_REGS
+
+#ifndef __ASSEMBLY__
+#define get_irq_regs() (__frame)
+#endif
+
+#endif /* _ASM_IRQ_REGS_H */
diff --git a/arch/frv/include/asm/irqflags.h b/arch/frv/include/asm/irqflags.h
new file mode 100644
index 00000000000..82f0b5363f4
--- /dev/null
+++ b/arch/frv/include/asm/irqflags.h
@@ -0,0 +1,158 @@
+/* FR-V interrupt handling
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+/*
+ * interrupt flag manipulation
+ * - use virtual interrupt management since touching the PSR is slow
+ * - ICC2.Z: T if interrupts virtually disabled
+ * - ICC2.C: F if interrupts really disabled
+ * - if Z==1 upon interrupt:
+ * - C is set to 0
+ * - interrupts are really disabled
+ * - entry.S returns immediately
+ * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts
+ * - if taken, the trap:
+ * - sets ICC2.C
+ * - enables interrupts
+ */
+static inline void arch_local_irq_disable(void)
+{
+ /* set Z flag, but don't change the C flag */
+ asm volatile(" andcc gr0,gr0,gr0,icc2 \n"
+ :
+ :
+ : "memory", "icc2"
+ );
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ /* clear Z flag and then test the C flag */
+ asm volatile(" oricc gr0,#1,gr0,icc2 \n"
+ " tihi icc2,gr0,#2 \n"
+ :
+ :
+ : "memory", "icc2"
+ );
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+
+ asm volatile("movsg ccr,%0"
+ : "=r"(flags)
+ :
+ : "memory");
+
+ /* shift ICC2.Z to bit 0 */
+ flags >>= 26;
+
+ /* make flags 1 if interrupts disabled, 0 otherwise */
+ return flags & 1UL;
+
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags = arch_local_save_flags();
+ arch_local_irq_disable();
+ return flags;
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ /* load the Z flag by turning 1 if disabled into 0 if disabled
+ * and thus setting the Z flag but not the C flag */
+ asm volatile(" xoricc %0,#1,gr0,icc2 \n"
+ /* then trap if Z=0 and C=0 */
+ " tihi icc2,gr0,#2 \n"
+ :
+ : "r"(flags)
+ : "memory", "icc2"
+ );
+
+}
+
+static inline bool arch_irqs_disabled_flags(unsigned long flags)
+{
+ return flags;
+}
+
+static inline bool arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+/*
+ * real interrupt flag manipulation
+ */
+#define __arch_local_irq_disable() \
+do { \
+ unsigned long psr; \
+ asm volatile(" movsg psr,%0 \n" \
+ " andi %0,%2,%0 \n" \
+ " ori %0,%1,%0 \n" \
+ " movgs %0,psr \n" \
+ : "=r"(psr) \
+ : "i" (PSR_PIL_14), "i" (~PSR_PIL) \
+ : "memory"); \
+} while (0)
+
+#define __arch_local_irq_enable() \
+do { \
+ unsigned long psr; \
+ asm volatile(" movsg psr,%0 \n" \
+ " andi %0,%1,%0 \n" \
+ " movgs %0,psr \n" \
+ : "=r"(psr) \
+ : "i" (~PSR_PIL) \
+ : "memory"); \
+} while (0)
+
+#define __arch_local_save_flags(flags) \
+do { \
+ typecheck(unsigned long, flags); \
+ asm("movsg psr,%0" \
+ : "=r"(flags) \
+ : \
+ : "memory"); \
+} while (0)
+
+#define __arch_local_irq_save(flags) \
+do { \
+ unsigned long npsr; \
+ typecheck(unsigned long, flags); \
+ asm volatile(" movsg psr,%0 \n" \
+ " andi %0,%3,%1 \n" \
+ " ori %1,%2,%1 \n" \
+ " movgs %1,psr \n" \
+ : "=r"(flags), "=r"(npsr) \
+ : "i" (PSR_PIL_14), "i" (~PSR_PIL) \
+ : "memory"); \
+} while (0)
+
+#define __arch_local_irq_restore(flags) \
+do { \
+ typecheck(unsigned long, flags); \
+ asm volatile(" movgs %0,psr \n" \
+ : \
+ : "r" (flags) \
+ : "memory"); \
+} while (0)
+
+#define __arch_irqs_disabled() \
+ ((__get_PSR() & PSR_PIL) >= PSR_PIL_14)
+
+#endif /* _ASM_IRQFLAGS_H */
diff --git a/arch/frv/include/asm/kdebug.h b/arch/frv/include/asm/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/arch/frv/include/asm/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
new file mode 100644
index 00000000000..f8e16b2a580
--- /dev/null
+++ b/arch/frv/include/asm/kmap_types.h
@@ -0,0 +1,29 @@
+
+#ifndef _ASM_KMAP_TYPES_H
+#define _ASM_KMAP_TYPES_H
+
+enum km_type {
+ /* arch specific kmaps - change the numbers attached to these at your peril */
+ __KM_CACHE, /* cache flush page attachment point */
+ __KM_PGD, /* current page directory */
+ __KM_ITLB_PTD, /* current instruction TLB miss page table lookup */
+ __KM_DTLB_PTD, /* current data TLB miss page table lookup */
+
+ /* general kmaps */
+ KM_BOUNCE_READ,
+ KM_SKB_SUNRPC_DATA,
+ KM_SKB_DATA_SOFTIRQ,
+ KM_USER0,
+ KM_USER1,
+ KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
+ KM_PTE0,
+ KM_PTE1,
+ KM_IRQ0,
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+ KM_TYPE_NR
+};
+
+#endif
diff --git a/arch/frv/include/asm/linkage.h b/arch/frv/include/asm/linkage.h
new file mode 100644
index 00000000000..636c1bced7d
--- /dev/null
+++ b/arch/frv/include/asm/linkage.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 4
+#define __ALIGN_STR ".align 4"
+
+#endif
diff --git a/arch/frv/include/asm/local.h b/arch/frv/include/asm/local.h
new file mode 100644
index 00000000000..c27bdf04630
--- /dev/null
+++ b/arch/frv/include/asm/local.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_LOCAL_H
+#define _ASM_LOCAL_H
+
+#include <asm-generic/local.h>
+
+#endif /* _ASM_LOCAL_H */
diff --git a/arch/frv/include/asm/local64.h b/arch/frv/include/asm/local64.h
new file mode 100644
index 00000000000..36c93b5cc23
--- /dev/null
+++ b/arch/frv/include/asm/local64.h
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/arch/frv/include/asm/math-emu.h b/arch/frv/include/asm/math-emu.h
new file mode 100644
index 00000000000..0c8f731b218
--- /dev/null
+++ b/arch/frv/include/asm/math-emu.h
@@ -0,0 +1,301 @@
+#ifndef _ASM_MATH_EMU_H
+#define _ASM_MATH_EMU_H
+
+#include <asm/setup.h>
+#include <linux/linkage.h>
+
+/* Status Register bits */
+
+/* accrued exception bits */
+#define FPSR_AEXC_INEX 3
+#define FPSR_AEXC_DZ 4
+#define FPSR_AEXC_UNFL 5
+#define FPSR_AEXC_OVFL 6
+#define FPSR_AEXC_IOP 7
+
+/* exception status bits */
+#define FPSR_EXC_INEX1 8
+#define FPSR_EXC_INEX2 9
+#define FPSR_EXC_DZ 10
+#define FPSR_EXC_UNFL 11
+#define FPSR_EXC_OVFL 12
+#define FPSR_EXC_OPERR 13
+#define FPSR_EXC_SNAN 14
+#define FPSR_EXC_BSUN 15
+
+/* quotient byte, assumes big-endian, of course */
+#define FPSR_QUOTIENT(fpsr) (*((signed char *) &(fpsr) + 1))
+
+/* condition code bits */
+#define FPSR_CC_NAN 24
+#define FPSR_CC_INF 25
+#define FPSR_CC_Z 26
+#define FPSR_CC_NEG 27
+
+
+/* Control register bits */
+
+/* rounding mode */
+#define FPCR_ROUND_RN 0 /* round to nearest/even */
+#define FPCR_ROUND_RZ 1 /* round to zero */
+#define FPCR_ROUND_RM 2 /* minus infinity */
+#define FPCR_ROUND_RP 3 /* plus infinity */
+
+/* rounding precision */
+#define FPCR_PRECISION_X 0 /* long double */
+#define FPCR_PRECISION_S 1 /* double */
+#define FPCR_PRECISION_D 2 /* float */
+
+
+/* Flags to select the debugging output */
+#define PDECODE 0
+#define PEXECUTE 1
+#define PCONV 2
+#define PNORM 3
+#define PREGISTER 4
+#define PINSTR 5
+#define PUNIMPL 6
+#define PMOVEM 7
+
+#define PMDECODE (1<<PDECODE)
+#define PMEXECUTE (1<<PEXECUTE)
+#define PMCONV (1<<PCONV)
+#define PMNORM (1<<PNORM)
+#define PMREGISTER (1<<PREGISTER)
+#define PMINSTR (1<<PINSTR)
+#define PMUNIMPL (1<<PUNIMPL)
+#define PMMOVEM (1<<PMOVEM)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+union fp_mant64 {
+ unsigned long long m64;
+ unsigned long m32[2];
+};
+
+union fp_mant128 {
+ unsigned long long m64[2];
+ unsigned long m32[4];
+};
+
+/* internal representation of extended fp numbers */
+struct fp_ext {
+ unsigned char lowmant;
+ unsigned char sign;
+ unsigned short exp;
+ union fp_mant64 mant;
+};
+
+/* C representation of FPU registers */
+/* NOTE: if you change this, you have to change the assembler offsets
+ below and the size in <asm/fpu.h>, too */
+struct fp_data {
+ struct fp_ext fpreg[8];
+ unsigned int fpcr;
+ unsigned int fpsr;
+ unsigned int fpiar;
+ unsigned short prec;
+ unsigned short rnd;
+ struct fp_ext temp[2];
+};
+
+#if FPU_EMU_DEBUG
+extern unsigned int fp_debugprint;
+
+#define dprint(bit, fmt, args...) ({ \
+ if (fp_debugprint & (1 << (bit))) \
+ printk(fmt, ## args); \
+})
+#else
+#define dprint(bit, fmt, args...)
+#endif
+
+#define uprint(str) ({ \
+ static int __count = 3; \
+ \
+ if (__count > 0) { \
+ printk("You just hit an unimplemented " \
+ "fpu instruction (%s)\n", str); \
+ printk("Please report this to ....\n"); \
+ __count--; \
+ } \
+})
+
+#define FPDATA ((struct fp_data *)current->thread.fp)
+
+#else /* __ASSEMBLY__ */
+
+#define FPDATA %a2
+
+/* offsets from the base register to the floating point data in the task struct */
+#define FPD_FPREG (TASK_THREAD+THREAD_FPREG+0)
+#define FPD_FPCR (TASK_THREAD+THREAD_FPREG+96)
+#define FPD_FPSR (TASK_THREAD+THREAD_FPREG+100)
+#define FPD_FPIAR (TASK_THREAD+THREAD_FPREG+104)
+#define FPD_PREC (TASK_THREAD+THREAD_FPREG+108)
+#define FPD_RND (TASK_THREAD+THREAD_FPREG+110)
+#define FPD_TEMPFP1 (TASK_THREAD+THREAD_FPREG+112)
+#define FPD_TEMPFP2 (TASK_THREAD+THREAD_FPREG+124)
+#define FPD_SIZEOF (TASK_THREAD+THREAD_FPREG+136)
+
+/* offsets on the stack to access saved registers,
+ * these are only used during instruction decoding
+ * where we always know how deep we're on the stack.
+ */
+#define FPS_DO (PT_D0)
+#define FPS_D1 (PT_D1)
+#define FPS_D2 (PT_D2)
+#define FPS_A0 (PT_A0)
+#define FPS_A1 (PT_A1)
+#define FPS_A2 (PT_A2)
+#define FPS_SR (PT_SR)
+#define FPS_PC (PT_PC)
+#define FPS_EA (PT_PC+6)
+#define FPS_PC2 (PT_PC+10)
+
+.macro fp_get_fp_reg
+ lea (FPD_FPREG,FPDATA,%d0.w*4),%a0
+ lea (%a0,%d0.w*8),%a0
+.endm
+
+/* Macros used to get/put the current program counter.
+ * 020/030 use a different stack frame then 040/060, for the
+ * 040/060 the return pc points already to the next location,
+ * so this only needs to be modified for jump instructions.
+ */
+.macro fp_get_pc dest
+ move.l (FPS_PC+4,%sp),\dest
+.endm
+
+.macro fp_put_pc src,jump=0
+ move.l \src,(FPS_PC+4,%sp)
+.endm
+
+.macro fp_get_instr_data f,s,dest,label
+ getuser \f,%sp@(FPS_PC+4)@(0),\dest,\label,%sp@(FPS_PC+4)
+ addq.l #\s,%sp@(FPS_PC+4)
+.endm
+
+.macro fp_get_instr_word dest,label,addr
+ fp_get_instr_data w,2,\dest,\label,\addr
+.endm
+
+.macro fp_get_instr_long dest,label,addr
+ fp_get_instr_data l,4,\dest,\label,\addr
+.endm
+
+/* These macros are used to read from/write to user space
+ * on error we jump to the fixup section, load the fault
+ * address into %a0 and jump to the exit.
+ * (derived from <asm/uaccess.h>)
+ */
+.macro getuser size,src,dest,label,addr
+| printf ,"[\size<%08x]",1,\addr
+.Lu1\@: moves\size \src,\dest
+
+ .section .fixup,"ax"
+ .even
+.Lu2\@: move.l \addr,%a0
+ jra \label
+ .previous
+
+ .section __ex_table,"a"
+ .align 4
+ .long .Lu1\@,.Lu2\@
+ .previous
+.endm
+
+.macro putuser size,src,dest,label,addr
+| printf ,"[\size>%08x]",1,\addr
+.Lu1\@: moves\size \src,\dest
+.Lu2\@:
+
+ .section .fixup,"ax"
+ .even
+.Lu3\@: move.l \addr,%a0
+ jra \label
+ .previous
+
+ .section __ex_table,"a"
+ .align 4
+ .long .Lu1\@,.Lu3\@
+ .long .Lu2\@,.Lu3\@
+ .previous
+.endm
+
+
+.macro movestack nr,arg1,arg2,arg3,arg4,arg5
+ .if \nr
+ movestack (\nr-1),\arg2,\arg3,\arg4,\arg5
+ move.l \arg1,-(%sp)
+ .endif
+.endm
+
+.macro printf bit=-1,string,nr=0,arg1,arg2,arg3,arg4,arg5
+#ifdef FPU_EMU_DEBUG
+ .data
+.Lpdata\@:
+ .string "\string"
+ .previous
+
+ movem.l %d0/%d1/%a0/%a1,-(%sp)
+ .if \bit+1
+#if 0
+ moveq #\bit,%d0
+ andw #7,%d0
+ btst %d0,fp_debugprint+((31-\bit)/8)
+#else
+ btst #\bit,fp_debugprint+((31-\bit)/8)
+#endif
+ jeq .Lpskip\@
+ .endif
+ movestack \nr,\arg1,\arg2,\arg3,\arg4,\arg5
+ pea .Lpdata\@
+ jsr printk
+ lea ((\nr+1)*4,%sp),%sp
+.Lpskip\@:
+ movem.l (%sp)+,%d0/%d1/%a0/%a1
+#endif
+.endm
+
+.macro printx bit,fp
+#ifdef FPU_EMU_DEBUG
+ movem.l %d0/%a0,-(%sp)
+ lea \fp,%a0
+#if 0
+ moveq #'+',%d0
+ tst.w (%a0)
+ jeq .Lx1\@
+ moveq #'-',%d0
+.Lx1\@: printf \bit," %c",1,%d0
+ move.l (4,%a0),%d0
+ bclr #31,%d0
+ jne .Lx2\@
+ printf \bit,"0."
+ jra .Lx3\@
+.Lx2\@: printf \bit,"1."
+.Lx3\@: printf \bit,"%08x%08x",2,%d0,%a0@(8)
+ move.w (2,%a0),%d0
+ ext.l %d0
+ printf \bit,"E%04x",1,%d0
+#else
+ printf \bit," %08x%08x%08x",3,%a0@,%a0@(4),%a0@(8)
+#endif
+ movem.l (%sp)+,%d0/%a0
+#endif
+.endm
+
+.macro debug instr,args
+#ifdef FPU_EMU_DEBUG
+ \instr \args
+#endif
+.endm
+
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_FRV_MATH_EMU_H */
+
diff --git a/arch/frv/include/asm/mb-regs.h b/arch/frv/include/asm/mb-regs.h
new file mode 100644
index 00000000000..219e5f926f1
--- /dev/null
+++ b/arch/frv/include/asm/mb-regs.h
@@ -0,0 +1,200 @@
+/* mb-regs.h: motherboard registers
+ *
+ * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_MB_REGS_H
+#define _ASM_MB_REGS_H
+
+#include <asm/cpu-irqs.h>
+#include <asm/sections.h>
+#include <asm/mem-layout.h>
+
+#ifndef __ASSEMBLY__
+/* gcc builtins, annotated */
+
+unsigned long __builtin_read8(volatile void __iomem *);
+unsigned long __builtin_read16(volatile void __iomem *);
+unsigned long __builtin_read32(volatile void __iomem *);
+void __builtin_write8(volatile void __iomem *, unsigned char);
+void __builtin_write16(volatile void __iomem *, unsigned short);
+void __builtin_write32(volatile void __iomem *, unsigned long);
+#endif
+
+#define __region_IO KERNEL_IO_START /* the region from 0xe0000000 to 0xffffffff has suitable
+ * protection laid over the top for use in memory-mapped
+ * I/O
+ */
+
+#define __region_CS0 0xff000000 /* Boot ROMs area */
+
+#ifdef CONFIG_MB93091_VDK
+/*
+ * VDK motherboard and CPU card specific stuff
+ */
+
+#include <asm/mb93091-fpga-irqs.h>
+
+#define IRQ_CPU_MB93493_0 IRQ_CPU_EXTERNAL0
+#define IRQ_CPU_MB93493_1 IRQ_CPU_EXTERNAL1
+
+#define __region_CS2 0xe0000000 /* SLBUS/PCI I/O space */
+#define __region_CS2_M 0x0fffffff /* mask */
+#define __region_CS2_C 0x00000000 /* control */
+#define __region_CS5 0xf0000000 /* MB93493 CSC area (DAV daughter board) */
+#define __region_CS5_M 0x00ffffff
+#define __region_CS5_C 0x00010000
+#define __region_CS7 0xf1000000 /* CB70 CPU-card PCMCIA port I/O space */
+#define __region_CS7_M 0x00ffffff
+#define __region_CS7_C 0x00410701
+#define __region_CS1 0xfc000000 /* SLBUS/PCI bridge control registers */
+#define __region_CS1_M 0x000fffff
+#define __region_CS1_C 0x00000000
+#define __region_CS6 0xfc100000 /* CB70 CPU-card DM9000 LAN I/O space */
+#define __region_CS6_M 0x000fffff
+#define __region_CS6_C 0x00400707
+#define __region_CS3 0xfc200000 /* MB93493 CSR area (DAV daughter board) */
+#define __region_CS3_M 0x000fffff
+#define __region_CS3_C 0xc8100000
+#define __region_CS4 0xfd000000 /* CB70 CPU-card extra flash space */
+#define __region_CS4_M 0x00ffffff
+#define __region_CS4_C 0x00000f07
+
+#define __region_PCI_IO (__region_CS2 + 0x04000000UL)
+#define __region_PCI_MEM (__region_CS2 + 0x08000000UL)
+#define __flush_PCI_writes() \
+do { \
+ __builtin_write8((volatile void __iomem *) __region_PCI_MEM, 0); \
+} while(0)
+
+#define __is_PCI_IO(addr) \
+ (((unsigned long)(addr) >> 24) - (__region_PCI_IO >> 24) < (0x04000000UL >> 24))
+
+#define __is_PCI_MEM(addr) \
+ ((unsigned long)(addr) - __region_PCI_MEM < 0x08000000UL)
+
+#define __is_PCI_addr(addr) \
+ ((unsigned long)(addr) - __region_PCI_IO < 0x0c000000UL)
+
+#define __get_CLKSW() ({ *(volatile unsigned long *)(__region_CS2 + 0x0130000cUL) & 0xffUL; })
+#define __get_CLKIN() (__get_CLKSW() * 125U * 100000U / 24U)
+
+#ifndef __ASSEMBLY__
+extern int __nongprelbss mb93090_mb00_detected;
+#endif
+
+#define __addr_LEDS() (__region_CS2 + 0x01200004UL)
+#ifdef CONFIG_MB93090_MB00
+#define __set_LEDS(X) \
+do { \
+ if (mb93090_mb00_detected) \
+ __builtin_write32((void __iomem *) __addr_LEDS(), ~(X)); \
+} while (0)
+#else
+#define __set_LEDS(X)
+#endif
+
+#define __addr_LCD() (__region_CS2 + 0x01200008UL)
+#define __get_LCD(B) __builtin_read32((volatile void __iomem *) (B))
+#define __set_LCD(B,X) __builtin_write32((volatile void __iomem *) (B), (X))
+
+#define LCD_D 0x000000ff /* LCD data bus */
+#define LCD_RW 0x00000100 /* LCD R/W signal */
+#define LCD_RS 0x00000200 /* LCD Register Select */
+#define LCD_E 0x00000400 /* LCD Start Enable Signal */
+
+#define LCD_CMD_CLEAR (LCD_E|0x001)
+#define LCD_CMD_HOME (LCD_E|0x002)
+#define LCD_CMD_CURSOR_INC (LCD_E|0x004)
+#define LCD_CMD_SCROLL_INC (LCD_E|0x005)
+#define LCD_CMD_CURSOR_DEC (LCD_E|0x006)
+#define LCD_CMD_SCROLL_DEC (LCD_E|0x007)
+#define LCD_CMD_OFF (LCD_E|0x008)
+#define LCD_CMD_ON(CRSR,BLINK) (LCD_E|0x00c|(CRSR<<1)|BLINK)
+#define LCD_CMD_CURSOR_MOVE_L (LCD_E|0x010)
+#define LCD_CMD_CURSOR_MOVE_R (LCD_E|0x014)
+#define LCD_CMD_DISPLAY_SHIFT_L (LCD_E|0x018)
+#define LCD_CMD_DISPLAY_SHIFT_R (LCD_E|0x01c)
+#define LCD_CMD_FUNCSET(DL,N,F) (LCD_E|0x020|(DL<<4)|(N<<3)|(F<<2))
+#define LCD_CMD_SET_CG_ADDR(X) (LCD_E|0x040|X)
+#define LCD_CMD_SET_DD_ADDR(X) (LCD_E|0x080|X)
+#define LCD_CMD_READ_BUSY (LCD_E|LCD_RW)
+#define LCD_DATA_WRITE(X) (LCD_E|LCD_RS|(X))
+#define LCD_DATA_READ (LCD_E|LCD_RS|LCD_RW)
+
+#else
+/*
+ * PDK unit specific stuff
+ */
+
+#include <asm/mb93093-fpga-irqs.h>
+
+#define IRQ_CPU_MB93493_0 IRQ_CPU_EXTERNAL0
+#define IRQ_CPU_MB93493_1 IRQ_CPU_EXTERNAL1
+
+#define __region_CS5 0xf0000000 /* MB93493 CSC area (DAV daughter board) */
+#define __region_CS5_M 0x00ffffff /* mask */
+#define __region_CS5_C 0x00010000 /* control */
+#define __region_CS2 0x20000000 /* FPGA registers */
+#define __region_CS2_M 0x000fffff
+#define __region_CS2_C 0x00000000
+#define __region_CS1 0xfc100000 /* LAN registers */
+#define __region_CS1_M 0x000fffff
+#define __region_CS1_C 0x00010404
+#define __region_CS3 0xfc200000 /* MB93493 CSR area (DAV daughter board) */
+#define __region_CS3_M 0x000fffff
+#define __region_CS3_C 0xc8000000
+#define __region_CS4 0xfd000000 /* extra ROMs area */
+#define __region_CS4_M 0x00ffffff
+#define __region_CS4_C 0x00000f07
+
+#define __region_CS6 0xfe000000 /* not used - hide behind CPU resource I/O regs */
+#define __region_CS6_M 0x000fffff
+#define __region_CS6_C 0x00000f07
+#define __region_CS7 0xfe000000 /* not used - hide behind CPU resource I/O regs */
+#define __region_CS7_M 0x000fffff
+#define __region_CS7_C 0x00000f07
+
+#define __is_PCI_IO(addr) 0 /* no PCI */
+#define __is_PCI_MEM(addr) 0
+#define __is_PCI_addr(addr) 0
+#define __region_PCI_IO 0
+#define __region_PCI_MEM 0
+#define __flush_PCI_writes() do { } while(0)
+
+#define __get_CLKSW() 0UL
+#define __get_CLKIN() 66000000UL
+
+#define __addr_LEDS() (__region_CS2 + 0x00000023UL)
+#define __set_LEDS(X) __builtin_write8((volatile void __iomem *) __addr_LEDS(), (X))
+
+#define __addr_FPGATR() (__region_CS2 + 0x00000030UL)
+#define __set_FPGATR(X) __builtin_write32((volatile void __iomem *) __addr_FPGATR(), (X))
+#define __get_FPGATR() __builtin_read32((volatile void __iomem *) __addr_FPGATR())
+
+#define MB93093_FPGA_FPGATR_AUDIO_CLK 0x00000003
+
+#define __set_FPGATR_AUDIO_CLK(V) \
+ __set_FPGATR((__get_FPGATR() & ~MB93093_FPGA_FPGATR_AUDIO_CLK) | (V))
+
+#define MB93093_FPGA_FPGATR_AUDIO_CLK_OFF 0x0
+#define MB93093_FPGA_FPGATR_AUDIO_CLK_11MHz 0x1
+#define MB93093_FPGA_FPGATR_AUDIO_CLK_12MHz 0x2
+#define MB93093_FPGA_FPGATR_AUDIO_CLK_02MHz 0x3
+
+#define MB93093_FPGA_SWR_PUSHSWMASK (0x1F<<26)
+#define MB93093_FPGA_SWR_PUSHSW4 (1<<29)
+
+#define __addr_FPGA_SWR ((volatile void __iomem *)(__region_CS2 + 0x28UL))
+#define __get_FPGA_PUSHSW1_5() (__builtin_read32(__addr_FPGA_SWR) & MB93093_FPGA_SWR_PUSHSWMASK)
+
+
+#endif
+
+#endif /* _ASM_MB_REGS_H */
diff --git a/arch/frv/include/asm/mb86943a.h b/arch/frv/include/asm/mb86943a.h
new file mode 100644
index 00000000000..e87ef924bfb
--- /dev/null
+++ b/arch/frv/include/asm/mb86943a.h
@@ -0,0 +1,42 @@
+/* mb86943a.h: MB86943 SPARClite <-> PCI bridge registers
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_MB86943A_H
+#define _ASM_MB86943A_H
+
+#include <asm/mb-regs.h>
+
+#define __reg_MB86943_sl_ctl *(volatile uint32_t *) (__region_CS1 + 0x00)
+
+#define MB86943_SL_CTL_BUS_WIDTH_64 0x00000001
+#define MB86943_SL_CTL_AS_HOST 0x00000002
+#define MB86943_SL_CTL_DRCT_MASTER_SWAP 0x00000004
+#define MB86943_SL_CTL_DRCT_SLAVE_SWAP 0x00000008
+#define MB86943_SL_CTL_PCI_CONFIG_SWAP 0x00000010
+#define MB86943_SL_CTL_ECS0_ENABLE 0x00000020
+#define MB86943_SL_CTL_ECS1_ENABLE 0x00000040
+#define MB86943_SL_CTL_ECS2_ENABLE 0x00000080
+
+#define __reg_MB86943_ecs_ctl(N) *(volatile uint32_t *) (__region_CS1 + 0x08 + (0x08*(N)))
+#define __reg_MB86943_ecs_range(N) *(volatile uint32_t *) (__region_CS1 + 0x20 + (0x10*(N)))
+#define __reg_MB86943_ecs_base(N) *(volatile uint32_t *) (__region_CS1 + 0x28 + (0x10*(N)))
+
+#define __reg_MB86943_sl_pci_io_range *(volatile uint32_t *) (__region_CS1 + 0x50)
+#define __reg_MB86943_sl_pci_io_base *(volatile uint32_t *) (__region_CS1 + 0x58)
+#define __reg_MB86943_sl_pci_mem_range *(volatile uint32_t *) (__region_CS1 + 0x60)
+#define __reg_MB86943_sl_pci_mem_base *(volatile uint32_t *) (__region_CS1 + 0x68)
+#define __reg_MB86943_pci_sl_io_base *(volatile uint32_t *) (__region_CS1 + 0x70)
+#define __reg_MB86943_pci_sl_mem_base *(volatile uint32_t *) (__region_CS1 + 0x78)
+
+#define __reg_MB86943_pci_arbiter *(volatile uint32_t *) (__region_CS2 + 0x01300014)
+#define MB86943_PCIARB_EN 0x00000001
+
+#endif /* _ASM_MB86943A_H */
diff --git a/arch/frv/include/asm/mb93091-fpga-irqs.h b/arch/frv/include/asm/mb93091-fpga-irqs.h
new file mode 100644
index 00000000000..19778c5ba9d
--- /dev/null
+++ b/arch/frv/include/asm/mb93091-fpga-irqs.h
@@ -0,0 +1,42 @@
+/* mb93091-fpga-irqs.h: MB93091 CPU board FPGA IRQs
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_MB93091_FPGA_IRQS_H
+#define _ASM_MB93091_FPGA_IRQS_H
+
+#include <asm/irq.h>
+
+#ifndef __ASSEMBLY__
+
+/* IRQ IDs presented to drivers */
+enum {
+ IRQ_FPGA__UNUSED = IRQ_BASE_FPGA,
+ IRQ_FPGA_SYSINT_BUS_EXPANSION_1,
+ IRQ_FPGA_SL_BUS_EXPANSION_2,
+ IRQ_FPGA_PCI_INTD,
+ IRQ_FPGA_PCI_INTC,
+ IRQ_FPGA_PCI_INTB,
+ IRQ_FPGA_PCI_INTA,
+ IRQ_FPGA_SL_BUS_EXPANSION_7,
+ IRQ_FPGA_SYSINT_BUS_EXPANSION_8,
+ IRQ_FPGA_SL_BUS_EXPANSION_9,
+ IRQ_FPGA_MB86943_PCI_INTA,
+ IRQ_FPGA_MB86943_SLBUS_SIDE,
+ IRQ_FPGA_RTL8029_INTA,
+ IRQ_FPGA_SYSINT_BUS_EXPANSION_13,
+ IRQ_FPGA_SL_BUS_EXPANSION_14,
+ IRQ_FPGA_NMI,
+};
+
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_MB93091_FPGA_IRQS_H */
diff --git a/arch/frv/include/asm/mb93093-fpga-irqs.h b/arch/frv/include/asm/mb93093-fpga-irqs.h
new file mode 100644
index 00000000000..590266b1a6d
--- /dev/null
+++ b/arch/frv/include/asm/mb93093-fpga-irqs.h
@@ -0,0 +1,29 @@
+/* mb93093-fpga-irqs.h: MB93093 CPU board FPGA IRQs
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_MB93093_FPGA_IRQS_H
+#define _ASM_MB93093_FPGA_IRQS_H
+
+#include <asm/irq.h>
+
+#ifndef __ASSEMBLY__
+
+/* IRQ IDs presented to drivers */
+enum {
+ IRQ_FPGA_PUSH_BUTTON_SW1_5 = IRQ_BASE_FPGA + 8,
+ IRQ_FPGA_ROCKER_C_SW8 = IRQ_BASE_FPGA + 9,
+ IRQ_FPGA_ROCKER_C_SW9 = IRQ_BASE_FPGA + 10,
+};
+
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_MB93093_FPGA_IRQS_H */
diff --git a/arch/frv/include/asm/mb93493-irqs.h b/arch/frv/include/asm/mb93493-irqs.h
new file mode 100644
index 00000000000..82c7aeddd33
--- /dev/null
+++ b/arch/frv/include/asm/mb93493-irqs.h
@@ -0,0 +1,50 @@
+/* mb93493-irqs.h: MB93493 companion chip IRQs
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_MB93493_IRQS_H
+#define _ASM_MB93493_IRQS_H
+
+#include <asm/irq.h>
+
+#ifndef __ASSEMBLY__
+
+/* IRQ IDs presented to drivers */
+enum {
+ IRQ_MB93493_VDC = IRQ_BASE_MB93493 + 0,
+ IRQ_MB93493_VCC = IRQ_BASE_MB93493 + 1,
+ IRQ_MB93493_AUDIO_OUT = IRQ_BASE_MB93493 + 2,
+ IRQ_MB93493_I2C_0 = IRQ_BASE_MB93493 + 3,
+ IRQ_MB93493_I2C_1 = IRQ_BASE_MB93493 + 4,
+ IRQ_MB93493_USB = IRQ_BASE_MB93493 + 5,
+ IRQ_MB93493_LOCAL_BUS = IRQ_BASE_MB93493 + 7,
+ IRQ_MB93493_PCMCIA = IRQ_BASE_MB93493 + 8,
+ IRQ_MB93493_GPIO = IRQ_BASE_MB93493 + 9,
+ IRQ_MB93493_AUDIO_IN = IRQ_BASE_MB93493 + 10,
+};
+
+/* IRQ multiplexor mappings */
+#define ROUTE_VIA_IRQ0 0 /* route IRQ by way of CPU external IRQ 0 */
+#define ROUTE_VIA_IRQ1 1 /* route IRQ by way of CPU external IRQ 1 */
+
+#define IRQ_MB93493_VDC_ROUTE ROUTE_VIA_IRQ0
+#define IRQ_MB93493_VCC_ROUTE ROUTE_VIA_IRQ1
+#define IRQ_MB93493_AUDIO_OUT_ROUTE ROUTE_VIA_IRQ1
+#define IRQ_MB93493_I2C_0_ROUTE ROUTE_VIA_IRQ1
+#define IRQ_MB93493_I2C_1_ROUTE ROUTE_VIA_IRQ1
+#define IRQ_MB93493_USB_ROUTE ROUTE_VIA_IRQ1
+#define IRQ_MB93493_LOCAL_BUS_ROUTE ROUTE_VIA_IRQ1
+#define IRQ_MB93493_PCMCIA_ROUTE ROUTE_VIA_IRQ1
+#define IRQ_MB93493_GPIO_ROUTE ROUTE_VIA_IRQ1
+#define IRQ_MB93493_AUDIO_IN_ROUTE ROUTE_VIA_IRQ1
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_MB93493_IRQS_H */
diff --git a/arch/frv/include/asm/mb93493-regs.h b/arch/frv/include/asm/mb93493-regs.h
new file mode 100644
index 00000000000..8a1f6aac8cf
--- /dev/null
+++ b/arch/frv/include/asm/mb93493-regs.h
@@ -0,0 +1,281 @@
+/* mb93493-regs.h: MB93493 companion chip registers
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_MB93493_REGS_H
+#define _ASM_MB93493_REGS_H
+
+#include <asm/mb-regs.h>
+#include <asm/mb93493-irqs.h>
+
+#define __addr_MB93493(X) ((volatile unsigned long *)(__region_CS3 + (X)))
+#define __get_MB93493(X) ({ *(volatile unsigned long *)(__region_CS3 + (X)); })
+
+#define __set_MB93493(X,V) \
+do { \
+ *(volatile unsigned long *)(__region_CS3 + (X)) = (V); mb(); \
+} while(0)
+
+#define __get_MB93493_STSR(X) __get_MB93493(0x3c0 + (X) * 4)
+#define __set_MB93493_STSR(X,V) __set_MB93493(0x3c0 + (X) * 4, (V))
+#define MB93493_STSR_EN
+
+#define __addr_MB93493_IQSR(X) __addr_MB93493(0x3d0 + (X) * 4)
+#define __get_MB93493_IQSR(X) __get_MB93493(0x3d0 + (X) * 4)
+#define __set_MB93493_IQSR(X,V) __set_MB93493(0x3d0 + (X) * 4, (V))
+
+#define __get_MB93493_DQSR(X) __get_MB93493(0x3e0 + (X) * 4)
+#define __set_MB93493_DQSR(X,V) __set_MB93493(0x3e0 + (X) * 4, (V))
+
+#define __get_MB93493_LBSER() __get_MB93493(0x3f0)
+#define __set_MB93493_LBSER(V) __set_MB93493(0x3f0, (V))
+
+#define MB93493_LBSER_VDC 0x00010000
+#define MB93493_LBSER_VCC 0x00020000
+#define MB93493_LBSER_AUDIO 0x00040000
+#define MB93493_LBSER_I2C_0 0x00080000
+#define MB93493_LBSER_I2C_1 0x00100000
+#define MB93493_LBSER_USB 0x00200000
+#define MB93493_LBSER_GPIO 0x00800000
+#define MB93493_LBSER_PCMCIA 0x01000000
+
+#define __get_MB93493_LBSR() __get_MB93493(0x3fc)
+#define __set_MB93493_LBSR(V) __set_MB93493(0x3fc, (V))
+
+/*
+ * video display controller
+ */
+#define __get_MB93493_VDC(X) __get_MB93493(MB93493_VDC_##X)
+#define __set_MB93493_VDC(X,V) __set_MB93493(MB93493_VDC_##X, (V))
+
+#define MB93493_VDC_RCURSOR 0x140 /* cursor position */
+#define MB93493_VDC_RCT1 0x144 /* cursor colour 1 */
+#define MB93493_VDC_RCT2 0x148 /* cursor colour 2 */
+#define MB93493_VDC_RHDC 0x150 /* horizontal display period */
+#define MB93493_VDC_RH_MARGINS 0x154 /* horizontal margin sizes */
+#define MB93493_VDC_RVDC 0x158 /* vertical display period */
+#define MB93493_VDC_RV_MARGINS 0x15c /* vertical margin sizes */
+#define MB93493_VDC_RC 0x170 /* VDC control */
+#define MB93493_VDC_RCLOCK 0x174 /* clock divider, DMA req delay */
+#define MB93493_VDC_RBLACK 0x178 /* black insert sizes */
+#define MB93493_VDC_RS 0x17c /* VDC status */
+
+#define __addr_MB93493_VDC_BCI(X) ({ (volatile unsigned long *)(__region_CS3 + 0x000 + (X)); })
+#define __addr_MB93493_VDC_TPO(X) (__region_CS3 + 0x1c0 + (X))
+
+#define VDC_TPO_WIDTH 32
+
+#define VDC_RC_DSR 0x00000080 /* VDC master reset */
+
+#define VDC_RS_IT 0x00060000 /* interrupt indicators */
+#define VDC_RS_IT_UNDERFLOW 0x00040000 /* - underflow event */
+#define VDC_RS_IT_VSYNC 0x00020000 /* - VSYNC event */
+#define VDC_RS_DFI 0x00010000 /* current interlace field number */
+#define VDC_RS_DFI_TOP 0x00000000 /* - top field */
+#define VDC_RS_DFI_BOTTOM 0x00010000 /* - bottom field */
+#define VDC_RS_DCSR 0x00000010 /* cursor state */
+#define VDC_RS_DCM 0x00000003 /* display mode */
+#define VDC_RS_DCM_DISABLED 0x00000000 /* - display disabled */
+#define VDC_RS_DCM_STOPPED 0x00000001 /* - VDC stopped */
+#define VDC_RS_DCM_FREERUNNING 0x00000002 /* - VDC free-running */
+#define VDC_RS_DCM_TRANSFERRING 0x00000003 /* - data being transferred to VDC */
+
+/*
+ * video capture controller
+ */
+#define __get_MB93493_VCC(X) __get_MB93493(MB93493_VCC_##X)
+#define __set_MB93493_VCC(X,V) __set_MB93493(MB93493_VCC_##X, (V))
+
+#define MB93493_VCC_RREDUCT 0x104 /* reduction rate */
+#define MB93493_VCC_RHY 0x108 /* horizontal brightness filter coefficients */
+#define MB93493_VCC_RHC 0x10c /* horizontal colour-difference filter coefficients */
+#define MB93493_VCC_RHSIZE 0x110 /* horizontal cycle sizes */
+#define MB93493_VCC_RHBC 0x114 /* horizontal back porch size */
+#define MB93493_VCC_RVCC 0x118 /* vertical capture period */
+#define MB93493_VCC_RVBC 0x11c /* vertical back porch period */
+#define MB93493_VCC_RV 0x120 /* vertical filter coefficients */
+#define MB93493_VCC_RDTS 0x128 /* DMA transfer size */
+#define MB93493_VCC_RDTS_4B 0x01000000 /* 4-byte transfer */
+#define MB93493_VCC_RDTS_32B 0x03000000 /* 32-byte transfer */
+#define MB93493_VCC_RDTS_SHIFT 24
+#define MB93493_VCC_RCC 0x130 /* VCC control */
+#define MB93493_VCC_RIS 0x134 /* VCC interrupt status */
+
+#define __addr_MB93493_VCC_TPI(X) (__region_CS3 + 0x180 + (X))
+
+#define VCC_RHSIZE_RHCC 0x000007ff
+#define VCC_RHSIZE_RHCC_SHIFT 0
+#define VCC_RHSIZE_RHTCC 0x0fff0000
+#define VCC_RHSIZE_RHTCC_SHIFT 16
+
+#define VCC_RVBC_RVBC 0x00003f00
+#define VCC_RVBC_RVBC_SHIFT 8
+
+#define VCC_RREDUCT_RHR 0x07ff0000
+#define VCC_RREDUCT_RHR_SHIFT 16
+#define VCC_RREDUCT_RVR 0x000007ff
+#define VCC_RREDUCT_RVR_SHIFT 0
+
+#define VCC_RCC_CE 0x00000001 /* VCC enable */
+#define VCC_RCC_CS 0x00000002 /* request video capture start */
+#define VCC_RCC_CPF 0x0000000c /* pixel format */
+#define VCC_RCC_CPF_YCBCR_16 0x00000000 /* - YCbCr 4:2:2 16-bit format */
+#define VCC_RCC_CPF_RGB 0x00000004 /* - RGB 4:4:4 format */
+#define VCC_RCC_CPF_YCBCR_24 0x00000008 /* - YCbCr 4:2:2 24-bit format */
+#define VCC_RCC_CPF_BT656 0x0000000c /* - ITU R-BT.656 format */
+#define VCC_RCC_CPF_SHIFT 2
+#define VCC_RCC_CSR 0x00000080 /* request reset */
+#define VCC_RCC_HSIP 0x00000100 /* HSYNC polarity */
+#define VCC_RCC_HSIP_LOACT 0x00000000 /* - low active */
+#define VCC_RCC_HSIP_HIACT 0x00000100 /* - high active */
+#define VCC_RCC_VSIP 0x00000200 /* VSYNC polarity */
+#define VCC_RCC_VSIP_LOACT 0x00000000 /* - low active */
+#define VCC_RCC_VSIP_HIACT 0x00000200 /* - high active */
+#define VCC_RCC_CIE 0x00000800 /* interrupt enable */
+#define VCC_RCC_CFP 0x00001000 /* RGB pixel packing */
+#define VCC_RCC_CFP_4TO3 0x00000000 /* - pack 4 pixels into 3 words */
+#define VCC_RCC_CFP_1TO1 0x00001000 /* - pack 1 pixel into 1 words */
+#define VCC_RCC_CSM 0x00006000 /* interlace specification */
+#define VCC_RCC_CSM_ONEPASS 0x00002000 /* - non-interlaced */
+#define VCC_RCC_CSM_INTERLACE 0x00004000 /* - interlaced */
+#define VCC_RCC_CSM_SHIFT 13
+#define VCC_RCC_ES 0x00008000 /* capture start polarity */
+#define VCC_RCC_ES_NEG 0x00000000 /* - negative edge */
+#define VCC_RCC_ES_POS 0x00008000 /* - positive edge */
+#define VCC_RCC_IFI 0x00080000 /* inferlace field evaluation reverse */
+#define VCC_RCC_FDTS 0x00300000 /* interlace field start */
+#define VCC_RCC_FDTS_3_8 0x00000000 /* - 3/8 of horizontal entire cycle */
+#define VCC_RCC_FDTS_1_4 0x00100000 /* - 1/4 of horizontal entire cycle */
+#define VCC_RCC_FDTS_7_16 0x00200000 /* - 7/16 of horizontal entire cycle */
+#define VCC_RCC_FDTS_SHIFT 20
+#define VCC_RCC_MOV 0x00400000 /* test bit - always set to 1 */
+#define VCC_RCC_STP 0x00800000 /* request video capture stop */
+#define VCC_RCC_TO 0x01000000 /* input during top-field only */
+
+#define VCC_RIS_VSYNC 0x01000000 /* VSYNC interrupt */
+#define VCC_RIS_OV 0x02000000 /* overflow interrupt */
+#define VCC_RIS_BOTTOM 0x08000000 /* interlace bottom field */
+#define VCC_RIS_STARTED 0x10000000 /* capture started */
+
+/*
+ * I2C
+ */
+#define MB93493_I2C_BSR 0x340 /* bus status */
+#define MB93493_I2C_BCR 0x344 /* bus control */
+#define MB93493_I2C_CCR 0x348 /* clock control */
+#define MB93493_I2C_ADR 0x34c /* address */
+#define MB93493_I2C_DTR 0x350 /* data */
+#define MB93493_I2C_BC2R 0x35c /* bus control 2 */
+
+#define __addr_MB93493_I2C(port,X) (__region_CS3 + MB93493_I2C_##X + ((port)*0x20))
+#define __get_MB93493_I2C(port,X) __get_MB93493(MB93493_I2C_##X + ((port)*0x20))
+#define __set_MB93493_I2C(port,X,V) __set_MB93493(MB93493_I2C_##X + ((port)*0x20), (V))
+
+#define I2C_BSR_BB (1 << 7)
+
+/*
+ * audio controller (I2S) registers
+ */
+#define __get_MB93493_I2S(X) __get_MB93493(MB93493_I2S_##X)
+#define __set_MB93493_I2S(X,V) __set_MB93493(MB93493_I2S_##X, (V))
+
+#define MB93493_I2S_ALDR 0x300 /* L-channel data */
+#define MB93493_I2S_ARDR 0x304 /* R-channel data */
+#define MB93493_I2S_APDR 0x308 /* 16-bit packed data */
+#define MB93493_I2S_AISTR 0x310 /* status */
+#define MB93493_I2S_AICR 0x314 /* control */
+
+#define __addr_MB93493_I2S_ALDR(X) (__region_CS3 + MB93493_I2S_ALDR + (X))
+#define __addr_MB93493_I2S_ARDR(X) (__region_CS3 + MB93493_I2S_ARDR + (X))
+#define __addr_MB93493_I2S_APDR(X) (__region_CS3 + MB93493_I2S_APDR + (X))
+#define __addr_MB93493_I2S_ADR(X) (__region_CS3 + 0x320 + (X))
+
+#define I2S_AISTR_OTST 0x00000003 /* status of output data transfer */
+#define I2S_AISTR_OTR 0x00000010 /* output transfer request pending */
+#define I2S_AISTR_OUR 0x00000020 /* output FIFO underrun detected */
+#define I2S_AISTR_OOR 0x00000040 /* output FIFO overrun detected */
+#define I2S_AISTR_ODS 0x00000100 /* output DMA transfer size */
+#define I2S_AISTR_ODE 0x00000400 /* output DMA transfer request enable */
+#define I2S_AISTR_OTRIE 0x00001000 /* output transfer request interrupt enable */
+#define I2S_AISTR_OURIE 0x00002000 /* output FIFO underrun interrupt enable */
+#define I2S_AISTR_OORIE 0x00004000 /* output FIFO overrun interrupt enable */
+#define I2S_AISTR__OUT_MASK 0x00007570
+#define I2S_AISTR_ITST 0x00030000 /* status of input data transfer */
+#define I2S_AISTR_ITST_SHIFT 16
+#define I2S_AISTR_ITR 0x00100000 /* input transfer request pending */
+#define I2S_AISTR_IUR 0x00200000 /* input FIFO underrun detected */
+#define I2S_AISTR_IOR 0x00400000 /* input FIFO overrun detected */
+#define I2S_AISTR_IDS 0x01000000 /* input DMA transfer size */
+#define I2S_AISTR_IDE 0x04000000 /* input DMA transfer request enable */
+#define I2S_AISTR_ITRIE 0x10000000 /* input transfer request interrupt enable */
+#define I2S_AISTR_IURIE 0x20000000 /* input FIFO underrun interrupt enable */
+#define I2S_AISTR_IORIE 0x40000000 /* input FIFO overrun interrupt enable */
+#define I2S_AISTR__IN_MASK 0x75700000
+
+#define I2S_AICR_MI 0x00000001 /* mono input requested */
+#define I2S_AICR_AMI 0x00000002 /* relation between LRCKI/FS1 and SDI */
+#define I2S_AICR_LRI 0x00000004 /* function of LRCKI pin */
+#define I2S_AICR_SDMI 0x00000070 /* format of input audio data */
+#define I2S_AICR_SDMI_SHIFT 4
+#define I2S_AICR_CLI 0x00000080 /* input FIFO clearing control */
+#define I2S_AICR_IM 0x00000300 /* input state control */
+#define I2S_AICR_IM_SHIFT 8
+#define I2S_AICR__IN_MASK 0x000003f7
+#define I2S_AICR_MO 0x00001000 /* mono output requested */
+#define I2S_AICR_AMO 0x00002000 /* relation between LRCKO/FS0 and SDO */
+#define I2S_AICR_AMO_SHIFT 13
+#define I2S_AICR_LRO 0x00004000 /* function of LRCKO pin */
+#define I2S_AICR_SDMO 0x00070000 /* format of output audio data */
+#define I2S_AICR_SDMO_SHIFT 16
+#define I2S_AICR_CLO 0x00080000 /* output FIFO clearing control */
+#define I2S_AICR_OM 0x00100000 /* output state control */
+#define I2S_AICR__OUT_MASK 0x001f7000
+#define I2S_AICR_DIV 0x03000000 /* frequency division rate */
+#define I2S_AICR_DIV_SHIFT 24
+#define I2S_AICR_FL 0x20000000 /* frame length */
+#define I2S_AICR_FS 0x40000000 /* frame sync method */
+#define I2S_AICR_ME 0x80000000 /* master enable */
+
+/*
+ * PCMCIA
+ */
+#define __addr_MB93493_PCMCIA(X) ((volatile unsigned long *)(__region_CS5 + (X)))
+
+/*
+ * GPIO
+ */
+#define __get_MB93493_GPIO_PDR(X) __get_MB93493(0x380 + (X) * 0xc0)
+#define __set_MB93493_GPIO_PDR(X,V) __set_MB93493(0x380 + (X) * 0xc0, (V))
+
+#define __get_MB93493_GPIO_GPDR(X) __get_MB93493(0x384 + (X) * 0xc0)
+#define __set_MB93493_GPIO_GPDR(X,V) __set_MB93493(0x384 + (X) * 0xc0, (V))
+
+#define __get_MB93493_GPIO_SIR(X) __get_MB93493(0x388 + (X) * 0xc0)
+#define __set_MB93493_GPIO_SIR(X,V) __set_MB93493(0x388 + (X) * 0xc0, (V))
+
+#define __get_MB93493_GPIO_SOR(X) __get_MB93493(0x38c + (X) * 0xc0)
+#define __set_MB93493_GPIO_SOR(X,V) __set_MB93493(0x38c + (X) * 0xc0, (V))
+
+#define __get_MB93493_GPIO_PDSR(X) __get_MB93493(0x390 + (X) * 0xc0)
+#define __set_MB93493_GPIO_PDSR(X,V) __set_MB93493(0x390 + (X) * 0xc0, (V))
+
+#define __get_MB93493_GPIO_PDCR(X) __get_MB93493(0x394 + (X) * 0xc0)
+#define __set_MB93493_GPIO_PDCR(X,V) __set_MB93493(0x394 + (X) * 0xc0, (V))
+
+#define __get_MB93493_GPIO_INTST(X) __get_MB93493(0x398 + (X) * 0xc0)
+#define __set_MB93493_GPIO_INTST(X,V) __set_MB93493(0x398 + (X) * 0xc0, (V))
+
+#define __get_MB93493_GPIO_IEHL(X) __get_MB93493(0x39c + (X) * 0xc0)
+#define __set_MB93493_GPIO_IEHL(X,V) __set_MB93493(0x39c + (X) * 0xc0, (V))
+
+#define __get_MB93493_GPIO_IELH(X) __get_MB93493(0x3a0 + (X) * 0xc0)
+#define __set_MB93493_GPIO_IELH(X,V) __set_MB93493(0x3a0 + (X) * 0xc0, (V))
+
+#endif /* _ASM_MB93493_REGS_H */
diff --git a/arch/frv/include/asm/mc146818rtc.h b/arch/frv/include/asm/mc146818rtc.h
new file mode 100644
index 00000000000..90dfb7a633d
--- /dev/null
+++ b/arch/frv/include/asm/mc146818rtc.h
@@ -0,0 +1,16 @@
+/* mc146818rtc.h: RTC defs
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_MC146818RTC_H
+#define _ASM_MC146818RTC_H
+
+
+#endif /* _ASM_MC146818RTC_H */
diff --git a/arch/frv/include/asm/mem-layout.h b/arch/frv/include/asm/mem-layout.h
new file mode 100644
index 00000000000..e9a0ec85a40
--- /dev/null
+++ b/arch/frv/include/asm/mem-layout.h
@@ -0,0 +1,86 @@
+/* mem-layout.h: memory layout
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_MEM_LAYOUT_H
+#define _ASM_MEM_LAYOUT_H
+
+#ifndef __ASSEMBLY__
+#define __UL(X) ((unsigned long) (X))
+#else
+#define __UL(X) (X)
+#endif
+
+/*
+ * PAGE_SHIFT determines the page size
+ */
+#define PAGE_SHIFT 14
+
+#ifndef __ASSEMBLY__
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#else
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#endif
+
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+/*
+ * the slab must be aligned such that load- and store-double instructions don't
+ * fault if used
+ */
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
+
+/*****************************************************************************/
+/*
+ * virtual memory layout from kernel's point of view
+ */
+#define PAGE_OFFSET ((unsigned long) &__page_offset)
+
+#ifdef CONFIG_MMU
+
+/* see Documentation/frv/mmu-layout.txt */
+#define KERNEL_LOWMEM_START __UL(0xc0000000)
+#define KERNEL_LOWMEM_END __UL(0xd0000000)
+#define VMALLOC_START __UL(0xd0000000)
+#define VMALLOC_END __UL(0xd8000000)
+#define PKMAP_BASE __UL(0xd8000000)
+#define PKMAP_END __UL(0xdc000000)
+#define KMAP_ATOMIC_SECONDARY_FRAME __UL(0xdc000000)
+#define KMAP_ATOMIC_PRIMARY_FRAME __UL(0xdd000000)
+
+#endif
+
+#define KERNEL_IO_START __UL(0xe0000000)
+
+
+/*****************************************************************************/
+/*
+ * memory layout from userspace's point of view
+ */
+#define BRK_BASE __UL(2 * 1024 * 1024 + PAGE_SIZE)
+#define STACK_TOP __UL(2 * 1024 * 1024)
+#define STACK_TOP_MAX __UL(0xc0000000)
+
+/* userspace process size */
+#ifdef CONFIG_MMU
+#define TASK_SIZE (PAGE_OFFSET)
+#else
+#define TASK_SIZE __UL(0xFFFFFFFFUL)
+#endif
+
+/* base of area at which unspecified mmaps will start */
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+#define TASK_UNMAPPED_BASE __UL(16 * 1024 * 1024)
+#else
+#define TASK_UNMAPPED_BASE __UL(TASK_SIZE / 3)
+#endif
+
+#endif /* _ASM_MEM_LAYOUT_H */
diff --git a/arch/frv/include/asm/mman.h b/arch/frv/include/asm/mman.h
new file mode 100644
index 00000000000..8eebf89f5ab
--- /dev/null
+++ b/arch/frv/include/asm/mman.h
@@ -0,0 +1 @@
+#include <asm-generic/mman.h>
diff --git a/arch/frv/include/asm/mmu.h b/arch/frv/include/asm/mmu.h
new file mode 100644
index 00000000000..86ca0e86e7d
--- /dev/null
+++ b/arch/frv/include/asm/mmu.h
@@ -0,0 +1,41 @@
+/* mmu.h: memory management context for FR-V with or without MMU support
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_MMU_H
+#define _ASM_MMU_H
+
+typedef struct {
+#ifdef CONFIG_MMU
+ struct list_head id_link; /* link in list of context ID owners */
+ unsigned short id; /* MMU context ID */
+ unsigned short id_busy; /* true if ID is in CXNR */
+ unsigned long itlb_cached_pge; /* [SCR0] PGE cached for insn TLB handler */
+ unsigned long itlb_ptd_mapping; /* [DAMR4] PTD mapping for itlb cached PGE */
+ unsigned long dtlb_cached_pge; /* [SCR1] PGE cached for data TLB handler */
+ unsigned long dtlb_ptd_mapping; /* [DAMR5] PTD mapping for dtlb cached PGE */
+
+#else
+ unsigned long end_brk;
+
+#endif
+
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
+
+} mm_context_t;
+
+#ifdef CONFIG_MMU
+extern int __nongpreldata cxn_pinned;
+extern int cxn_pin_by_pid(pid_t pid);
+#endif
+
+#endif /* _ASM_MMU_H */
diff --git a/arch/frv/include/asm/mmu_context.h b/arch/frv/include/asm/mmu_context.h
new file mode 100644
index 00000000000..c7daa395156
--- /dev/null
+++ b/arch/frv/include/asm/mmu_context.h
@@ -0,0 +1,50 @@
+/* mmu_context.h: MMU context management routines
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_MMU_CONTEXT_H
+#define _ASM_MMU_CONTEXT_H
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm-generic/mm_hooks.h>
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+#ifdef CONFIG_MMU
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+extern void change_mm_context(mm_context_t *old, mm_context_t *ctx, pgd_t *_pgd);
+extern void destroy_context(struct mm_struct *mm);
+
+#else
+#define init_new_context(tsk, mm) ({ 0; })
+#define change_mm_context(old, ctx, _pml4) do {} while(0)
+#define destroy_context(mm) do {} while(0)
+#endif
+
+#define switch_mm(prev, next, tsk) \
+do { \
+ if (prev != next) \
+ change_mm_context(&prev->context, &next->context, next->pgd); \
+} while(0)
+
+#define activate_mm(prev, next) \
+do { \
+ change_mm_context(&prev->context, &next->context, next->pgd); \
+} while(0)
+
+#define deactivate_mm(tsk, mm) \
+do { \
+} while(0)
+
+#endif
diff --git a/arch/frv/include/asm/module.h b/arch/frv/include/asm/module.h
new file mode 100644
index 00000000000..3d5c6360289
--- /dev/null
+++ b/arch/frv/include/asm/module.h
@@ -0,0 +1,28 @@
+/* module.h: FRV module stuff
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_MODULE_H
+#define _ASM_MODULE_H
+
+struct mod_arch_specific
+{
+};
+
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Ehdr Elf32_Ehdr
+
+/*
+ * Include the architecture version.
+ */
+#define MODULE_ARCH_VERMAGIC __stringify(PROCESSOR_MODEL_NAME) " "
+
+#endif /* _ASM_MODULE_H */
+
diff --git a/arch/frv/include/asm/msgbuf.h b/arch/frv/include/asm/msgbuf.h
new file mode 100644
index 00000000000..97ceb55a06f
--- /dev/null
+++ b/arch/frv/include/asm/msgbuf.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_MSGBUF_H
+#define _ASM_MSGBUF_H
+
+/*
+ * The msqid64_ds structure for FR-V architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ unsigned long __unused1;
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ unsigned long __unused2;
+ __kernel_time_t msg_ctime; /* last change time */
+ unsigned long __unused3;
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* _ASM_MSGBUF_H */
+
diff --git a/arch/frv/include/asm/mutex.h b/arch/frv/include/asm/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/arch/frv/include/asm/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h
new file mode 100644
index 00000000000..8c97068ac8f
--- /dev/null
+++ b/arch/frv/include/asm/page.h
@@ -0,0 +1,76 @@
+#ifndef _ASM_PAGE_H
+#define _ASM_PAGE_H
+
+#include <asm/virtconvert.h>
+#include <asm/mem-layout.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
+#define copy_user_page(vto, vfrom, vaddr, topg) memcpy((vto), (vfrom), PAGE_SIZE)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long ste[64];} pmd_t;
+typedef struct { pmd_t pue[1]; } pud_t;
+typedef struct { pud_t pge[1]; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).ste[0])
+#define pud_val(x) ((x).pue[0])
+#define pgd_val(x) ((x).pge[0])
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pud(x) ((pud_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+#define PTE_MASK PAGE_MASK
+
+#define devmem_is_allowed(pfn) 1
+
+#define __pa(vaddr) virt_to_phys((void *) (unsigned long) (vaddr))
+#define __va(paddr) phys_to_virt((unsigned long) (paddr))
+
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+extern unsigned long max_low_pfn;
+extern unsigned long min_low_pfn;
+extern unsigned long max_pfn;
+
+#ifdef CONFIG_MMU
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#else
+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
+#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn)
+
+#endif
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | \
+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#endif /* __ASSEMBLY__ */
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* _ASM_PAGE_H */
diff --git a/arch/frv/include/asm/param.h b/arch/frv/include/asm/param.h
new file mode 100644
index 00000000000..a52dca9a956
--- /dev/null
+++ b/arch/frv/include/asm/param.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_PARAM_H
+#define _ASM_PARAM_H
+
+#define EXEC_PAGESIZE 16384
+
+#include <asm-generic/param.h>
+
+#endif /* _ASM_PARAM_H */
diff --git a/arch/frv/include/asm/pci.h b/arch/frv/include/asm/pci.h
new file mode 100644
index 00000000000..ef03baf5d89
--- /dev/null
+++ b/arch/frv/include/asm/pci.h
@@ -0,0 +1,63 @@
+/* pci.h: FR-V specific PCI declarations
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ * - Derived from include/asm-m68k/pci.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_FRV_PCI_H
+#define _ASM_FRV_PCI_H
+
+#include <linux/mm.h>
+#include <asm/scatterlist.h>
+#include <asm-generic/pci-dma-compat.h>
+#include <asm-generic/pci.h>
+
+struct pci_dev;
+
+#define pcibios_assign_all_busses() 0
+
+extern void pcibios_set_master(struct pci_dev *dev);
+
+extern void pcibios_penalize_isa_irq(int irq);
+
+#ifdef CONFIG_MMU
+extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle);
+extern void consistent_free(void *vaddr);
+extern void consistent_sync(void *vaddr, size_t size, int direction);
+extern void consistent_sync_page(struct page *page, unsigned long offset,
+ size_t size, int direction);
+#endif
+
+extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle);
+
+extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+/* Return the index of the PCI controller for device PDEV. */
+#define pci_controller_num(PDEV) (0)
+
+#ifdef CONFIG_PCI
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ enum pci_dma_burst_strategy *strat,
+ unsigned long *strategy_parameter)
+{
+ *strat = PCI_DMA_BURST_INFINITY;
+ *strategy_parameter = ~0UL;
+}
+#endif
+
+/*
+ * These are pretty much arbitrary with the CoMEM implementation.
+ * We have the whole address space to ourselves.
+ */
+#define PCIBIOS_MIN_IO 0x100
+#define PCIBIOS_MIN_MEM 0x00010000
+
+#endif /* _ASM_FRV_PCI_H */
diff --git a/arch/frv/include/asm/percpu.h b/arch/frv/include/asm/percpu.h
new file mode 100644
index 00000000000..2cad3f874de
--- /dev/null
+++ b/arch/frv/include/asm/percpu.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_PERCPU_H
+#define __ASM_PERCPU_H
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ASM_PERCPU_H */
diff --git a/arch/frv/include/asm/perf_event.h b/arch/frv/include/asm/perf_event.h
new file mode 100644
index 00000000000..a69e0155d14
--- /dev/null
+++ b/arch/frv/include/asm/perf_event.h
@@ -0,0 +1,17 @@
+/* FRV performance event support
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PERF_EVENT_H
+#define _ASM_PERF_EVENT_H
+
+#define PERF_EVENT_INDEX_OFFSET 0
+
+#endif /* _ASM_PERF_EVENT_H */
diff --git a/arch/frv/include/asm/pgalloc.h b/arch/frv/include/asm/pgalloc.h
new file mode 100644
index 00000000000..416d19a632f
--- /dev/null
+++ b/arch/frv/include/asm/pgalloc.h
@@ -0,0 +1,69 @@
+/* pgalloc.h: Page allocation routines for FRV
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from:
+ * include/asm-m68knommu/pgalloc.h
+ * include/asm-i386/pgalloc.h
+ */
+#ifndef _ASM_PGALLOC_H
+#define _ASM_PGALLOC_H
+
+#include <asm/setup.h>
+#include <asm/virtconvert.h>
+
+#ifdef CONFIG_MMU
+
+#define pmd_populate_kernel(mm, pmd, pte) __set_pmd(pmd, __pa(pte) | _PAGE_TABLE)
+#define pmd_populate(MM, PMD, PAGE) \
+do { \
+ __set_pmd((PMD), page_to_pfn(PAGE) << PAGE_SHIFT | _PAGE_TABLE); \
+} while(0)
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+/*
+ * Allocate and free page tables.
+ */
+
+extern pgd_t *pgd_alloc(struct mm_struct *);
+extern void pgd_free(struct mm_struct *mm, pgd_t *);
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
+
+extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+ pgtable_page_dtor(pte);
+ __free_page(pte);
+}
+
+#define __pte_free_tlb(tlb,pte,address) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb),(pte)); \
+} while (0)
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ * (In the PAE case we free the pmds as part of the pgd.)
+ */
+#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); })
+#define pmd_free(mm, x) do { } while (0)
+#define __pmd_free_tlb(tlb,x,a) do { } while (0)
+
+#endif /* CONFIG_MMU */
+
+#endif /* _ASM_PGALLOC_H */
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
new file mode 100644
index 00000000000..6bc241e4b4f
--- /dev/null
+++ b/arch/frv/include/asm/pgtable.h
@@ -0,0 +1,544 @@
+/* pgtable.h: FR-V page table mangling
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from:
+ * include/asm-m68knommu/pgtable.h
+ * include/asm-i386/pgtable.h
+ */
+
+#ifndef _ASM_PGTABLE_H
+#define _ASM_PGTABLE_H
+
+#include <asm/mem-layout.h>
+#include <asm/setup.h>
+#include <asm/processor.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/threads.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+struct vm_area_struct;
+#endif
+
+#ifndef __ASSEMBLY__
+#if defined(CONFIG_HIGHPTE)
+typedef unsigned long pte_addr_t;
+#else
+typedef pte_t *pte_addr_t;
+#endif
+#endif
+
+/*****************************************************************************/
+/*
+ * MMU-less operation case first
+ */
+#ifndef CONFIG_MMU
+
+#define pgd_present(pgd) (1) /* pages are always present on NO_MM */
+#define pgd_none(pgd) (0)
+#define pgd_bad(pgd) (0)
+#define pgd_clear(pgdp)
+#define kern_addr_valid(addr) (1)
+#define pmd_offset(a, b) ((void *) 0)
+
+#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
+#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
+#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */
+#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */
+#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */
+
+#define __swp_type(x) (0)
+#define __swp_offset(x) (0)
+#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#ifndef __ASSEMBLY__
+static inline int pte_file(pte_t pte) { return 0; }
+#endif
+
+#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
+
+#define swapper_pg_dir ((pgd_t *) NULL)
+
+#define pgtable_cache_init() do {} while (0)
+
+#include <asm-generic/pgtable.h>
+
+#else /* !CONFIG_MMU */
+/*****************************************************************************/
+/*
+ * then MMU operation
+ */
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#ifndef __ASSEMBLY__
+extern unsigned long empty_zero_page;
+#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
+#endif
+
+/*
+ * we use 2-level page tables, folding the PMD (mid-level table) into the PGE (top-level entry)
+ * [see Documentation/frv/mmu-layout.txt]
+ *
+ * Page Directory:
+ * - Size: 16KB
+ * - 64 PGEs per PGD
+ * - Each PGE holds 1 PUD and covers 64MB
+ *
+ * Page Upper Directory:
+ * - Size: 256B
+ * - 1 PUE per PUD
+ * - Each PUE holds 1 PMD and covers 64MB
+ *
+ * Page Mid-Level Directory
+ * - Size: 256B
+ * - 1 PME per PMD
+ * - Each PME holds 64 STEs, all of which point to separate chunks of the same Page Table
+ * - All STEs are instantiated at the same time
+ *
+ * Page Table
+ * - Size: 16KB
+ * - 4096 PTEs per PT
+ * - Each Linux PT is subdivided into 64 FR451 PT's, each of which holds 64 entries
+ *
+ * Pages
+ * - Size: 4KB
+ *
+ * total PTEs
+ * = 1 PML4E * 64 PGEs * 1 PUEs * 1 PMEs * 4096 PTEs
+ * = 1 PML4E * 64 PGEs * 64 STEs * 64 PTEs/FR451-PT
+ * = 262144 (or 256 * 1024)
+ */
+#define PGDIR_SHIFT 26
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+#define PTRS_PER_PGD 64
+
+#define PUD_SHIFT 26
+#define PTRS_PER_PUD 1
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE - 1))
+#define PUE_SIZE 256
+
+#define PMD_SHIFT 26
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE - 1))
+#define PTRS_PER_PMD 1
+#define PME_SIZE 256
+
+#define __frv_PT_SIZE 256
+
+#define PTRS_PER_PTE 4096
+
+#define USER_PGDS_IN_LAST_PML4 (TASK_SIZE / PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0
+
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
+
+#define TWOLEVEL_PGDIR_SHIFT 26
+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
+#define BOOT_KERNEL_PGD_PTRS (PTRS_PER_PGD - BOOT_USER_PGD_PTRS)
+
+#ifndef __ASSEMBLY__
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte)
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pmd_val(pud_val(e)))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pmd_val(pud_val(pgd_val(e))))
+
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) \
+do { \
+ *(pteptr) = (pteval); \
+ asm volatile("dcf %M0" :: "U"(*pteptr)); \
+} while(0)
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+/*
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+
+/*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pud is never bad, and a pud always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline void pgd_clear(pgd_t *pgd) { }
+
+#define pgd_populate(mm, pgd, pud) do { } while (0)
+/*
+ * (puds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pgd(pgdptr, pgdval) \
+do { \
+ memcpy((pgdptr), &(pgdval), sizeof(pgd_t)); \
+ asm volatile("dcf %M0" :: "U"(*(pgdptr))); \
+} while(0)
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+{
+ return (pud_t *) pgd;
+}
+
+#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
+#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
+
+/*
+ * allocating and freeing a pud is trivial: the 1-entry pud is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+#define pud_alloc_one(mm, address) NULL
+#define pud_free(mm, x) do { } while (0)
+#define __pud_free_tlb(tlb, x, address) do { } while (0)
+
+/*
+ * The "pud_xxx()" functions here are trivial for a folded two-level
+ * setup: the pmd is never bad, and a pmd always exists (as it's folded
+ * into the pud entry)
+ */
+static inline int pud_none(pud_t pud) { return 0; }
+static inline int pud_bad(pud_t pud) { return 0; }
+static inline int pud_present(pud_t pud) { return 1; }
+static inline void pud_clear(pud_t *pud) { }
+
+#define pud_populate(mm, pmd, pte) do { } while (0)
+
+/*
+ * (pmds are folded into puds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval })
+
+#define pud_page(pud) (pmd_page((pmd_t){ pud }))
+#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
+
+/*
+ * (pmds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+extern void __set_pmd(pmd_t *pmdptr, unsigned long __pmd);
+
+#define set_pmd(pmdptr, pmdval) \
+do { \
+ __set_pmd((pmdptr), (pmdval).ste[0]); \
+} while(0)
+
+#define __pmd_index(address) 0
+
+static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
+{
+ return (pmd_t *) dir + __pmd_index(address);
+}
+
+#define pte_same(a, b) ((a).pte == (b).pte)
+#define pte_page(x) (mem_map + ((unsigned long)(((x).pte >> PAGE_SHIFT))))
+#define pte_none(x) (!(x).pte)
+#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT)))
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+#define VMALLOC_VMADDR(x) ((unsigned long) (x))
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * control flags in AMPR registers and TLB entries
+ */
+#define _PAGE_BIT_PRESENT xAMPRx_V_BIT
+#define _PAGE_BIT_WP DAMPRx_WP_BIT
+#define _PAGE_BIT_NOCACHE xAMPRx_C_BIT
+#define _PAGE_BIT_SUPER xAMPRx_S_BIT
+#define _PAGE_BIT_ACCESSED xAMPRx_RESERVED8_BIT
+#define _PAGE_BIT_DIRTY xAMPRx_M_BIT
+#define _PAGE_BIT_NOTGLOBAL xAMPRx_NG_BIT
+
+#define _PAGE_PRESENT xAMPRx_V
+#define _PAGE_WP DAMPRx_WP
+#define _PAGE_NOCACHE xAMPRx_C
+#define _PAGE_SUPER xAMPRx_S
+#define _PAGE_ACCESSED xAMPRx_RESERVED8 /* accessed if set */
+#define _PAGE_DIRTY xAMPRx_M
+#define _PAGE_NOTGLOBAL xAMPRx_NG
+
+#define _PAGE_RESERVED_MASK (xAMPRx_RESERVED8 | xAMPRx_RESERVED13)
+
+#define _PAGE_FILE 0x002 /* set:pagecache unset:swap */
+#define _PAGE_PROTNONE 0x000 /* If not present */
+
+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define __PGPROT_BASE \
+ (_PAGE_PRESENT | xAMPRx_SS_16Kb | xAMPRx_D | _PAGE_NOTGLOBAL | _PAGE_ACCESSED)
+
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(__PGPROT_BASE)
+#define PAGE_COPY __pgprot(__PGPROT_BASE | _PAGE_WP)
+#define PAGE_READONLY __pgprot(__PGPROT_BASE | _PAGE_WP)
+
+#define __PAGE_KERNEL (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY)
+#define __PAGE_KERNEL_NOCACHE (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY | _PAGE_NOCACHE)
+#define __PAGE_KERNEL_RO (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY | _PAGE_WP)
+
+#define MAKE_GLOBAL(x) __pgprot((x) & ~_PAGE_NOTGLOBAL)
+
+#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
+
+#define _PAGE_TABLE (_PAGE_PRESENT | xAMPRx_SS_16Kb)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * The FR451 can do execute protection by virtue of having separate TLB miss handlers for
+ * instruction access and for data access. However, we don't have enough reserved bits to say
+ * "execute only", so we don't bother. If you can read it, you can execute it and vice versa.
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED
+
+/*
+ * Define this to warn about kernel memory accesses that are
+ * done without a 'access_ok(VERIFY_WRITE,..)'
+ */
+#undef TEST_ACCESS_OK
+
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_bad(x) (pmd_val(x) & xAMPRx_SS)
+#define pmd_clear(xp) do { __set_pmd(xp, 0); } while(0)
+
+#define pmd_page_vaddr(pmd) \
+ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+#ifndef CONFIG_DISCONTIGMEM
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#endif
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_dirty(pte_t pte) { return (pte).pte & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return (pte).pte & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte) { return !((pte).pte & _PAGE_WP); }
+static inline int pte_special(pte_t pte) { return 0; }
+
+static inline pte_t pte_mkclean(pte_t pte) { (pte).pte &= ~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte) { (pte).pte &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte |= _PAGE_WP; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+ int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
+ asm volatile("dcf %M0" :: "U"(*ptep));
+ return i;
+}
+
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ unsigned long x = xchg(&ptep->pte, 0);
+ asm volatile("dcf %M0" :: "U"(*ptep));
+ return __pte(x);
+}
+
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ set_bit(_PAGE_BIT_WP, ptep);
+ asm volatile("dcf %M0" :: "U"(*ptep));
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable"
+ */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
+
+/* This takes a physical page address that is used by the remapping functions */
+#define mk_pte_phys(physpage, pgprot) pfn_pte((physpage) >> PAGE_SHIFT, pgprot)
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte.pte &= _PAGE_CHG_MASK;
+ pte.pte |= pgprot_val(newprot);
+ return pte;
+}
+
+/* to find an entry in a page-table-directory. */
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#define pgd_index_k(addr) pgd_index(addr)
+
+/* Find an entry in the bottom-level page table.. */
+#define __pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this macro returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+#define pte_index(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
+
+#if defined(CONFIG_HIGHPTE)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + pte_index(address))
+#define pte_unmap(pte) kunmap_atomic(pte)
+#else
+#define pte_offset_map(dir, address) \
+ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
+#define pte_unmap(pte) do { } while (0)
+#endif
+
+/*
+ * Handle swap and file entries
+ * - the PTE is encoded in the following format:
+ * bit 0: Must be 0 (!_PAGE_PRESENT)
+ * bit 1: Type: 0 for swap, 1 for file (_PAGE_FILE)
+ * bits 2-7: Swap type
+ * bits 8-31: Swap offset
+ * bits 2-31: File pgoff
+ */
+#define __swp_type(x) (((x).val >> 2) & 0x1f)
+#define __swp_offset(x) ((x).val >> 8)
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 8) })
+#define __pte_to_swp_entry(_pte) ((swp_entry_t) { (_pte).pte })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+static inline int pte_file(pte_t pte)
+{
+ return pte.pte & _PAGE_FILE;
+}
+
+#define PTE_FILE_MAX_BITS 29
+
+#define pte_to_pgoff(PTE) ((PTE).pte >> 2)
+#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define PageSkip(page) (0)
+#define kern_addr_valid(addr) (1)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTE_SAME
+#include <asm-generic/pgtable.h>
+
+/*
+ * preload information about a newly instantiated PTE into the SCR0/SCR1 PGE cache
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+ struct mm_struct *mm;
+ unsigned long ampr;
+
+ mm = current->mm;
+ if (mm) {
+ pgd_t *pge = pgd_offset(mm, address);
+ pud_t *pue = pud_offset(pge, address);
+ pmd_t *pme = pmd_offset(pue, address);
+
+ ampr = pme->ste[0] & 0xffffff00;
+ ampr |= xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C |
+ xAMPRx_V;
+ } else {
+ address = ULONG_MAX;
+ ampr = 0;
+ }
+
+ asm volatile("movgs %0,scr0\n"
+ "movgs %0,scr1\n"
+ "movgs %1,dampr4\n"
+ "movgs %1,dampr5\n"
+ :
+ : "r"(address), "r"(ampr)
+ );
+}
+
+#ifdef CONFIG_PROC_FS
+extern char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer);
+#endif
+
+extern void __init pgtable_cache_init(void);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* !CONFIG_MMU */
+
+#ifndef __ASSEMBLY__
+extern void __init paging_init(void);
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_PGTABLE_H */
diff --git a/arch/frv/include/asm/poll.h b/arch/frv/include/asm/poll.h
new file mode 100644
index 00000000000..0d01479ccc5
--- /dev/null
+++ b/arch/frv/include/asm/poll.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_POLL_H
+#define _ASM_POLL_H
+
+#define POLLWRNORM POLLOUT
+#define POLLWRBAND 256
+
+#include <asm-generic/poll.h>
+
+#undef POLLREMOVE
+
+#endif
+
diff --git a/arch/frv/include/asm/posix_types.h b/arch/frv/include/asm/posix_types.h
new file mode 100644
index 00000000000..a9f1f5be063
--- /dev/null
+++ b/arch/frv/include/asm/posix_types.h
@@ -0,0 +1,62 @@
+#ifndef _ASM_POSIX_TYPES_H
+#define _ASM_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned long __kernel_ino_t;
+typedef unsigned short __kernel_mode_t;
+typedef unsigned short __kernel_nlink_t;
+typedef long __kernel_off_t;
+typedef int __kernel_pid_t;
+typedef unsigned short __kernel_ipc_pid_t;
+typedef unsigned short __kernel_uid_t;
+typedef unsigned short __kernel_gid_t;
+typedef unsigned int __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef int __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_suseconds_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+typedef unsigned short __kernel_old_dev_t;
+
+#ifdef __GNUC__
+typedef long long __kernel_loff_t;
+#endif
+
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+
+#if defined(__KERNEL__)
+
+#undef __FD_SET
+#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
+
+#undef __FD_CLR
+#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
+
+#undef __FD_ISSET
+#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
+
+#undef __FD_ZERO
+#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
+
+#endif /* defined(__KERNEL__) */
+
+#endif
+
diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h
new file mode 100644
index 00000000000..81c2e271d62
--- /dev/null
+++ b/arch/frv/include/asm/processor.h
@@ -0,0 +1,152 @@
+/* processor.h: FRV processor definitions
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROCESSOR_H
+#define _ASM_PROCESSOR_H
+
+#include <asm/mem-layout.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <asm/sections.h>
+#include <asm/segment.h>
+#include <asm/fpu.h>
+#include <asm/registers.h>
+#include <asm/ptrace.h>
+#include <asm/current.h>
+#include <asm/cache.h>
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/*
+ * CPU type and hardware bug flags. Kept separately for each CPU.
+ */
+struct cpuinfo_frv {
+#ifdef CONFIG_MMU
+ unsigned long *pgd_quick;
+ unsigned long *pte_quick;
+ unsigned long pgtable_cache_sz;
+#endif
+} __cacheline_aligned;
+
+extern struct cpuinfo_frv __nongprelbss boot_cpu_data;
+
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+
+/*
+ * Bus types
+ */
+#define EISA_bus 0
+#define MCA_bus 0
+
+struct thread_struct {
+ struct pt_regs *frame; /* [GR28] exception frame ptr for this thread */
+ struct task_struct *curr; /* [GR29] current pointer for this thread */
+ unsigned long sp; /* [GR1 ] kernel stack pointer */
+ unsigned long fp; /* [GR2 ] kernel frame pointer */
+ unsigned long lr; /* link register */
+ unsigned long pc; /* program counter */
+ unsigned long gr[12]; /* [GR16-GR27] */
+ unsigned long sched_lr; /* LR from schedule() */
+
+ union {
+ struct pt_regs *frame0; /* top (user) stack frame */
+ struct user_context *user; /* userspace context */
+ };
+} __attribute__((aligned(8)));
+
+extern struct pt_regs *__kernel_frame0_ptr;
+extern struct task_struct *__kernel_current_task;
+
+#endif
+
+#ifndef __ASSEMBLY__
+#define INIT_THREAD_FRAME0 \
+ ((struct pt_regs *) \
+ (sizeof(init_stack) + (unsigned long) init_stack - sizeof(struct user_context)))
+
+#define INIT_THREAD { \
+ NULL, \
+ (struct task_struct *) init_stack, \
+ 0, 0, 0, 0, \
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
+ 0, \
+ { INIT_THREAD_FRAME0 }, \
+}
+
+/*
+ * do necessary setup to start up a newly executed thread.
+ * - need to discard the frame stacked by init() invoking the execve syscall
+ */
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ __frame = __kernel_frame0_ptr; \
+ __frame->pc = (_pc); \
+ __frame->psr &= ~PSR_S; \
+ __frame->sp = (_usp); \
+} while(0)
+
+extern void prepare_to_copy(struct task_struct *tsk);
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+extern asmlinkage int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+extern asmlinkage void save_user_regs(struct user_context *target);
+extern asmlinkage void *restore_user_regs(const struct user_context *target, ...);
+
+#define copy_segments(tsk, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+#define forget_segments() do { } while (0)
+
+/*
+ * Free current thread data structures etc..
+ */
+static inline void exit_thread(void)
+{
+}
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc)
+#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
+
+/* Allocation and freeing of basic task resources. */
+extern struct task_struct *alloc_task_struct_node(int node);
+extern void free_task_struct(struct task_struct *p);
+
+#define cpu_relax() barrier()
+
+/* data cache prefetch */
+#define ARCH_HAS_PREFETCH
+static inline void prefetch(const void *x)
+{
+ asm volatile("dcpl %0,gr0,#0" : : "r"(x));
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/frv/include/asm/ptrace.h b/arch/frv/include/asm/ptrace.h
new file mode 100644
index 00000000000..ef6635ca4ec
--- /dev/null
+++ b/arch/frv/include/asm/ptrace.h
@@ -0,0 +1,89 @@
+/* ptrace.h: ptrace() relevant definitions
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_PTRACE_H
+#define _ASM_PTRACE_H
+
+#include <asm/registers.h>
+#ifdef __KERNEL__
+#include <asm/irq_regs.h>
+
+#define in_syscall(regs) (((regs)->tbr & TBR_TT) == TBR_TT_TRAP0)
+#endif
+
+
+#define PT_PSR 0
+#define PT_ISR 1
+#define PT_CCR 2
+#define PT_CCCR 3
+#define PT_LR 4
+#define PT_LCR 5
+#define PT_PC 6
+
+#define PT__STATUS 7 /* exception status */
+#define PT_SYSCALLNO 8 /* syscall number or -1 */
+#define PT_ORIG_GR8 9 /* saved GR8 for signal handling */
+#define PT_GNER0 10
+#define PT_GNER1 11
+#define PT_IACC0H 12
+#define PT_IACC0L 13
+
+#define PT_GR(j) ( 14 + (j)) /* GRj for 0<=j<=63 */
+#define PT_FR(j) ( 78 + (j)) /* FRj for 0<=j<=63 */
+#define PT_FNER(j) (142 + (j)) /* FNERj for 0<=j<=1 */
+#define PT_MSR(j) (144 + (j)) /* MSRj for 0<=j<=2 */
+#define PT_ACC(j) (146 + (j)) /* ACCj for 0<=j<=7 */
+#define PT_ACCG(jklm) (154 + (jklm)) /* ACCGjklm for 0<=jklm<=1 (reads four regs per slot) */
+#define PT_FSR(j) (156 + (j)) /* FSRj for 0<=j<=0 */
+#define PT__GPEND 78
+#define PT__END 157
+
+#define PT_TBR PT_GR(0)
+#define PT_SP PT_GR(1)
+#define PT_FP PT_GR(2)
+#define PT_PREV_FRAME PT_GR(28) /* previous exception frame pointer (old gr28 value) */
+#define PT_CURR_TASK PT_GR(29) /* current task */
+
+
+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+#define PTRACE_GETFDPIC 31 /* get the ELF fdpic loadmap address */
+
+#define PTRACE_GETFDPIC_EXEC 0 /* [addr] request the executable loadmap */
+#define PTRACE_GETFDPIC_INTERP 1 /* [addr] request the interpreter loadmap */
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+/*
+ * we dedicate GR28 to keeping a pointer to the current exception frame
+ * - gr28 is destroyed on entry to the kernel from userspace
+ */
+register struct pt_regs *__frame asm("gr28");
+
+#define user_mode(regs) (!((regs)->psr & PSR_S))
+#define instruction_pointer(regs) ((regs)->pc)
+#define user_stack_pointer(regs) ((regs)->sp)
+
+extern unsigned long user_stack(const struct pt_regs *);
+#define profile_pc(regs) ((regs)->pc)
+
+#define task_pt_regs(task) ((task)->thread.frame0)
+
+#define arch_has_single_step() (1)
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_PTRACE_H */
diff --git a/arch/frv/include/asm/registers.h b/arch/frv/include/asm/registers.h
new file mode 100644
index 00000000000..9666119fcf6
--- /dev/null
+++ b/arch/frv/include/asm/registers.h
@@ -0,0 +1,232 @@
+/* registers.h: register frame declarations
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * notes:
+ *
+ * (1) that the members of all these structures are carefully aligned to permit
+ * usage of STD/STDF instructions
+ *
+ * (2) if you change these structures, you must change the code in
+ * arch/frvnommu/kernel/{break.S,entry.S,switch_to.S,gdb-stub.c}
+ *
+ *
+ * the kernel stack space block looks like this:
+ *
+ * +0x2000 +----------------------
+ * | union {
+ * | struct frv_frame0 {
+ * | struct user_context {
+ * | struct user_int_regs
+ * | struct user_fpmedia_regs
+ * | }
+ * | struct frv_debug_regs
+ * | }
+ * | struct pt_regs [user exception]
+ * | }
+ * +---------------------- <-- __kernel_frame0_ptr (maybe GR28)
+ * |
+ * | kernel stack
+ * |
+ * |......................
+ * | struct pt_regs [kernel exception]
+ * |...................... <-- __kernel_frame0_ptr (maybe GR28)
+ * |
+ * | kernel stack
+ * |
+ * |...................... <-- stack pointer (GR1)
+ * |
+ * | unused stack space
+ * |
+ * +----------------------
+ * | struct thread_info
+ * +0x0000 +---------------------- <-- __current_thread_info (GR15);
+ *
+ * note that GR28 points to the current exception frame
+ */
+
+#ifndef _ASM_REGISTERS_H
+#define _ASM_REGISTERS_H
+
+#ifndef __ASSEMBLY__
+#define __OFFSET(X,N) ((X)+(N)*4)
+#define __OFFSETC(X,N) xxxxxxxxxxxxxxxxxxxxxxxx
+#else
+#define __OFFSET(X,N) ((X)+(N)*4)
+#define __OFFSETC(X,N) ((X)+(N))
+#endif
+
+/*****************************************************************************/
+/*
+ * Exception/Interrupt frame
+ * - held on kernel stack
+ * - 8-byte aligned on stack (old SP is saved in frame)
+ * - GR0 is fixed 0, so we don't save it
+ */
+#ifndef __ASSEMBLY__
+
+struct pt_regs {
+ unsigned long psr; /* Processor Status Register */
+ unsigned long isr; /* Integer Status Register */
+ unsigned long ccr; /* Condition Code Register */
+ unsigned long cccr; /* Condition Code for Conditional Insns Register */
+ unsigned long lr; /* Link Register */
+ unsigned long lcr; /* Loop Count Register */
+ unsigned long pc; /* Program Counter Register */
+ unsigned long __status; /* exception status */
+ unsigned long syscallno; /* syscall number or -1 */
+ unsigned long orig_gr8; /* original syscall arg #1 */
+ unsigned long gner0;
+ unsigned long gner1;
+ unsigned long long iacc0;
+ unsigned long tbr; /* GR0 is fixed zero, so we use this for TBR */
+ unsigned long sp; /* GR1: USP/KSP */
+ unsigned long fp; /* GR2: FP */
+ unsigned long gr3;
+ unsigned long gr4;
+ unsigned long gr5;
+ unsigned long gr6;
+ unsigned long gr7; /* syscall number */
+ unsigned long gr8; /* 1st syscall param; syscall return */
+ unsigned long gr9; /* 2nd syscall param */
+ unsigned long gr10; /* 3rd syscall param */
+ unsigned long gr11; /* 4th syscall param */
+ unsigned long gr12; /* 5th syscall param */
+ unsigned long gr13; /* 6th syscall param */
+ unsigned long gr14;
+ unsigned long gr15;
+ unsigned long gr16; /* GP pointer */
+ unsigned long gr17; /* small data */
+ unsigned long gr18; /* PIC/PID */
+ unsigned long gr19;
+ unsigned long gr20;
+ unsigned long gr21;
+ unsigned long gr22;
+ unsigned long gr23;
+ unsigned long gr24;
+ unsigned long gr25;
+ unsigned long gr26;
+ unsigned long gr27;
+ struct pt_regs *next_frame; /* GR28 - next exception frame */
+ unsigned long gr29; /* GR29 - OS reserved */
+ unsigned long gr30; /* GR30 - OS reserved */
+ unsigned long gr31; /* GR31 - OS reserved */
+} __attribute__((aligned(8)));
+
+#endif
+
+#define REG__STATUS_STEP 0x00000001 /* - reenable single stepping on return */
+#define REG__STATUS_STEPPED 0x00000002 /* - single step caused exception */
+#define REG__STATUS_BROKE 0x00000004 /* - BREAK insn caused exception */
+#define REG__STATUS_SYSC_ENTRY 0x40000000 /* - T on syscall entry (ptrace.c only) */
+#define REG__STATUS_SYSC_EXIT 0x80000000 /* - T on syscall exit (ptrace.c only) */
+
+#define REG_GR(R) __OFFSET(REG_GR0, (R))
+
+#define REG_SP REG_GR(1)
+#define REG_FP REG_GR(2)
+#define REG_PREV_FRAME REG_GR(28) /* previous exception frame pointer (old gr28 value) */
+#define REG_CURR_TASK REG_GR(29) /* current task */
+
+/*****************************************************************************/
+/*
+ * debugging registers
+ */
+#ifndef __ASSEMBLY__
+
+struct frv_debug_regs
+{
+ unsigned long dcr;
+ unsigned long ibar[4] __attribute__((aligned(8)));
+ unsigned long dbar[4] __attribute__((aligned(8)));
+ unsigned long dbdr[4][4] __attribute__((aligned(8)));
+ unsigned long dbmr[4][4] __attribute__((aligned(8)));
+} __attribute__((aligned(8)));
+
+#endif
+
+/*****************************************************************************/
+/*
+ * userspace registers
+ */
+#ifndef __ASSEMBLY__
+
+struct user_int_regs
+{
+ /* integer registers
+ * - up to gr[31] mirror pt_regs
+ * - total size must be multiple of 8 bytes
+ */
+ unsigned long psr; /* Processor Status Register */
+ unsigned long isr; /* Integer Status Register */
+ unsigned long ccr; /* Condition Code Register */
+ unsigned long cccr; /* Condition Code for Conditional Insns Register */
+ unsigned long lr; /* Link Register */
+ unsigned long lcr; /* Loop Count Register */
+ unsigned long pc; /* Program Counter Register */
+ unsigned long __status; /* exception status */
+ unsigned long syscallno; /* syscall number or -1 */
+ unsigned long orig_gr8; /* original syscall arg #1 */
+ unsigned long gner[2];
+ unsigned long long iacc[1];
+
+ union {
+ unsigned long tbr;
+ unsigned long gr[64];
+ };
+};
+
+struct user_fpmedia_regs
+{
+ /* FP/Media registers */
+ unsigned long fr[64];
+ unsigned long fner[2];
+ unsigned long msr[2];
+ unsigned long acc[8];
+ unsigned char accg[8];
+ unsigned long fsr[1];
+};
+
+struct user_context
+{
+ struct user_int_regs i;
+ struct user_fpmedia_regs f;
+
+ /* we provide a context extension so that we can save the regs for CPUs that
+ * implement many more of Fujitsu's lavish register spec
+ */
+ void *extension;
+} __attribute__((aligned(8)));
+
+struct frv_frame0 {
+ union {
+ struct pt_regs regs;
+ struct user_context uc;
+ };
+
+ struct frv_debug_regs debug;
+
+} __attribute__((aligned(32)));
+
+#endif
+
+#define __INT_GR(R) __OFFSET(__INT_GR0, (R))
+
+#define __FPMEDIA_FR(R) __OFFSET(__FPMEDIA_FR0, (R))
+#define __FPMEDIA_FNER(R) __OFFSET(__FPMEDIA_FNER0, (R))
+#define __FPMEDIA_MSR(R) __OFFSET(__FPMEDIA_MSR0, (R))
+#define __FPMEDIA_ACC(R) __OFFSET(__FPMEDIA_ACC0, (R))
+#define __FPMEDIA_ACCG(R) __OFFSETC(__FPMEDIA_ACCG0, (R))
+#define __FPMEDIA_FSR(R) __OFFSET(__FPMEDIA_FSR0, (R))
+
+#define __THREAD_GR(R) __OFFSET(__THREAD_GR16, (R) - 16)
+
+#endif /* _ASM_REGISTERS_H */
diff --git a/arch/frv/include/asm/resource.h b/arch/frv/include/asm/resource.h
new file mode 100644
index 00000000000..5fc60548fd0
--- /dev/null
+++ b/arch/frv/include/asm/resource.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_RESOURCE_H
+#define _ASM_RESOURCE_H
+
+#include <asm-generic/resource.h>
+
+#endif /* _ASM_RESOURCE_H */
+
diff --git a/arch/frv/include/asm/scatterlist.h b/arch/frv/include/asm/scatterlist.h
new file mode 100644
index 00000000000..0e5eb301846
--- /dev/null
+++ b/arch/frv/include/asm/scatterlist.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_SCATTERLIST_H
+#define _ASM_SCATTERLIST_H
+
+#include <asm-generic/scatterlist.h>
+
+#endif /* !_ASM_SCATTERLIST_H */
diff --git a/arch/frv/include/asm/sections.h b/arch/frv/include/asm/sections.h
new file mode 100644
index 00000000000..17d0fb171bb
--- /dev/null
+++ b/arch/frv/include/asm/sections.h
@@ -0,0 +1,46 @@
+/* sections.h: linkage layout variables
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_SECTIONS_H
+#define _ASM_SECTIONS_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm-generic/sections.h>
+
+#ifdef __KERNEL__
+
+/*
+ * we don't want to put variables in the GP-REL section if they're not used very much - that would
+ * be waste since GP-REL addressing is limited to GP16+/-2048
+ */
+#define __nongpreldata __attribute__((section(".data")))
+#define __nongprelbss __attribute__((section(".bss")))
+
+/*
+ * linker symbols
+ */
+extern const void __kernel_image_start, __kernel_image_end, __page_offset;
+
+extern unsigned long __nongprelbss memory_start;
+extern unsigned long __nongprelbss memory_end;
+extern unsigned long __nongprelbss rom_length;
+
+/* determine if we're running from ROM */
+static inline int is_in_rom(unsigned long addr)
+{
+ return 0; /* default case: not in ROM */
+}
+
+#endif
+#endif
+#endif /* _ASM_SECTIONS_H */
diff --git a/arch/frv/include/asm/segment.h b/arch/frv/include/asm/segment.h
new file mode 100644
index 00000000000..a2320a4a004
--- /dev/null
+++ b/arch/frv/include/asm/segment.h
@@ -0,0 +1,45 @@
+/* segment.h: MMU segment settings
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#ifdef CONFIG_MMU
+#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
+#define KERNEL_DS MAKE_MM_SEG(0xdfffffffUL)
+#else
+#define USER_DS MAKE_MM_SEG(memory_end)
+#define KERNEL_DS MAKE_MM_SEG(0xe0000000UL)
+#endif
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (__current_thread_info->addr_limit)
+#define segment_eq(a,b) ((a).seg == (b).seg)
+#define __kernel_ds_p() segment_eq(get_fs(), KERNEL_DS)
+#define get_addr_limit() (get_fs().seg)
+
+#define set_fs(_x) \
+do { \
+ __current_thread_info->addr_limit = (_x); \
+} while(0)
+
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_SEGMENT_H */
diff --git a/arch/frv/include/asm/sembuf.h b/arch/frv/include/asm/sembuf.h
new file mode 100644
index 00000000000..164b12786d6
--- /dev/null
+++ b/arch/frv/include/asm/sembuf.h
@@ -0,0 +1,26 @@
+#ifndef _ASM_SEMBUF_H
+#define _ASM_SEMBUF_H
+
+/*
+ * The semid64_ds structure for FR-V architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ __kernel_time_t sem_otime; /* last semop time */
+ unsigned long __unused1;
+ __kernel_time_t sem_ctime; /* last change time */
+ unsigned long __unused2;
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _ASM_SEMBUF_H */
+
diff --git a/arch/frv/include/asm/serial-regs.h b/arch/frv/include/asm/serial-regs.h
new file mode 100644
index 00000000000..e1286bda00e
--- /dev/null
+++ b/arch/frv/include/asm/serial-regs.h
@@ -0,0 +1,44 @@
+/* serial-regs.h: serial port registers
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_SERIAL_REGS_H
+#define _ASM_SERIAL_REGS_H
+
+#include <linux/serial_reg.h>
+#include <asm/irc-regs.h>
+
+#define SERIAL_ICLK 33333333 /* the target serial input clock */
+#define UART0_BASE 0xfeff9c00
+#define UART1_BASE 0xfeff9c40
+
+#define __get_UART0(R) ({ __reg(UART0_BASE + (R) * 8) >> 24; })
+#define __get_UART1(R) ({ __reg(UART1_BASE + (R) * 8) >> 24; })
+#define __set_UART0(R,V) do { __reg(UART0_BASE + (R) * 8) = (V) << 24; } while(0)
+#define __set_UART1(R,V) do { __reg(UART1_BASE + (R) * 8) = (V) << 24; } while(0)
+
+#define __get_UART0_LSR() ({ __get_UART0(UART_LSR); })
+#define __get_UART1_LSR() ({ __get_UART1(UART_LSR); })
+
+#define __set_UART0_IER(V) __set_UART0(UART_IER,(V))
+#define __set_UART1_IER(V) __set_UART1(UART_IER,(V))
+
+/* serial prescaler select register */
+#define __get_UCPSR() ({ *(volatile unsigned long *)(0xfeff9c90); })
+#define __set_UCPSR(V) do { *(volatile unsigned long *)(0xfeff9c90) = (V); } while(0)
+#define UCPSR_SELECT0 0x07000000
+#define UCPSR_SELECT1 0x38000000
+
+/* serial prescaler base value register */
+#define __get_UCPVR() ({ *(volatile unsigned long *)(0xfeff9c98); mb(); })
+#define __set_UCPVR(V) do { *(volatile unsigned long *)(0xfeff9c98) = (V) << 24; mb(); } while(0)
+
+
+#endif /* _ASM_SERIAL_REGS_H */
diff --git a/arch/frv/include/asm/serial.h b/arch/frv/include/asm/serial.h
new file mode 100644
index 00000000000..dbb82599868
--- /dev/null
+++ b/arch/frv/include/asm/serial.h
@@ -0,0 +1,18 @@
+/*
+ * serial.h
+ *
+ * Copyright (C) 2003 Develer S.r.l. (http://www.develer.com/)
+ * Author: Bernardo Innocenti <bernie@codewiz.org>
+ *
+ * Based on linux/include/asm-i386/serial.h
+ */
+#include <asm/serial-regs.h>
+
+/*
+ * the base baud is derived from the clock speed and so is variable
+ */
+#define BASE_BAUD 0
+
+#define STD_COM_FLAGS ASYNC_BOOT_AUTOCONF
+
+#define SERIAL_PORT_DFNS
diff --git a/arch/frv/include/asm/setup.h b/arch/frv/include/asm/setup.h
new file mode 100644
index 00000000000..afd787ceede
--- /dev/null
+++ b/arch/frv/include/asm/setup.h
@@ -0,0 +1,31 @@
+/* setup.h: setup stuff
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_SETUP_H
+#define _ASM_SETUP_H
+
+#define COMMAND_LINE_SIZE 512
+
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_MMU
+extern unsigned long __initdata num_mappedpages;
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_SETUP_H */
diff --git a/arch/frv/include/asm/shmbuf.h b/arch/frv/include/asm/shmbuf.h
new file mode 100644
index 00000000000..4c6e711a477
--- /dev/null
+++ b/arch/frv/include/asm/shmbuf.h
@@ -0,0 +1,43 @@
+#ifndef _ASM_SHMBUF_H
+#define _ASM_SHMBUF_H
+
+/*
+ * The shmid64_ds structure for FR-V architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ unsigned long __unused1;
+ __kernel_time_t shm_dtime; /* last detach time */
+ unsigned long __unused2;
+ __kernel_time_t shm_ctime; /* last change time */
+ unsigned long __unused3;
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _ASM_SHMBUF_H */
+
diff --git a/arch/frv/include/asm/shmparam.h b/arch/frv/include/asm/shmparam.h
new file mode 100644
index 00000000000..ab711009cfa
--- /dev/null
+++ b/arch/frv/include/asm/shmparam.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_SHMPARAM_H
+#define _ASM_SHMPARAM_H
+
+#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
+
+#endif /* _ASM_SHMPARAM_H */
+
diff --git a/arch/frv/include/asm/sigcontext.h b/arch/frv/include/asm/sigcontext.h
new file mode 100644
index 00000000000..3b263f3cc96
--- /dev/null
+++ b/arch/frv/include/asm/sigcontext.h
@@ -0,0 +1,26 @@
+/* sigcontext.h: FRV signal context
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_SIGCONTEXT_H
+#define _ASM_SIGCONTEXT_H
+
+#include <asm/registers.h>
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked. Note: only add new entries
+ * to the end of the structure.
+ */
+struct sigcontext {
+ struct user_context sc_context;
+ unsigned long sc_oldmask; /* old sigmask */
+} __attribute__((aligned(8)));
+
+#endif
diff --git a/arch/frv/include/asm/siginfo.h b/arch/frv/include/asm/siginfo.h
new file mode 100644
index 00000000000..d3fd1ca4565
--- /dev/null
+++ b/arch/frv/include/asm/siginfo.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_SIGINFO_H
+#define _ASM_SIGINFO_H
+
+#include <linux/types.h>
+#include <asm-generic/siginfo.h>
+
+#define FPE_MDAOVF (__SI_FAULT|9) /* media overflow */
+#undef NSIGFPE
+#define NSIGFPE 9
+
+#endif
+
diff --git a/arch/frv/include/asm/signal.h b/arch/frv/include/asm/signal.h
new file mode 100644
index 00000000000..f071e813dcb
--- /dev/null
+++ b/arch/frv/include/asm/signal.h
@@ -0,0 +1,44 @@
+#ifndef _ASM_SIGNAL_H
+#define _ASM_SIGNAL_H
+
+#include <linux/types.h>
+
+#ifndef __KERNEL__
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+#define NSIG 32
+typedef unsigned long sigset_t;
+
+#endif /* !__KERNEL__ */
+
+#define SA_RESTORER 0x04000000 /* to get struct sigaction correct */
+
+#include <asm-generic/signal.h>
+
+#ifdef __KERNEL__
+struct old_sigaction {
+ __sighandler_t sa_handler;
+ old_sigset_t sa_mask;
+ unsigned long sa_flags;
+ __sigrestore_t sa_restorer;
+};
+
+#else
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_SIGNAL_H */
diff --git a/arch/frv/include/asm/smp.h b/arch/frv/include/asm/smp.h
new file mode 100644
index 00000000000..38349ec8b61
--- /dev/null
+++ b/arch/frv/include/asm/smp.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_SMP_H
+#define __ASM_SMP_H
+
+
+#ifdef CONFIG_SMP
+#error SMP not supported
+#endif
+
+#endif
diff --git a/arch/frv/include/asm/socket.h b/arch/frv/include/asm/socket.h
new file mode 100644
index 00000000000..ce80fdadcce
--- /dev/null
+++ b/arch/frv/include/asm/socket.h
@@ -0,0 +1,69 @@
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockopt(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_SNDBUFFORCE 32
+#define SO_RCVBUFFORCE 33
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+/* To add :#define SO_REUSEPORT 15 */
+#define SO_PASSCRED 16
+#define SO_PEERCRED 17
+#define SO_RCVLOWAT 18
+#define SO_SNDLOWAT 19
+#define SO_RCVTIMEO 20
+#define SO_SNDTIMEO 21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION 22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
+#define SO_SECURITY_ENCRYPTION_NETWORK 24
+
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+
+#define SO_PEERNAME 28
+#define SO_TIMESTAMP 29
+#define SCM_TIMESTAMP SO_TIMESTAMP
+
+#define SO_ACCEPTCONN 30
+
+#define SO_PEERSEC 31
+#define SO_PASSSEC 34
+#define SO_TIMESTAMPNS 35
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+
+#define SO_MARK 36
+
+#define SO_TIMESTAMPING 37
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
+#define SO_PROTOCOL 38
+#define SO_DOMAIN 39
+
+#define SO_RXQ_OVFL 40
+
+#define SO_WIFI_STATUS 41
+#define SCM_WIFI_STATUS SO_WIFI_STATUS
+
+#endif /* _ASM_SOCKET_H */
+
diff --git a/arch/frv/include/asm/sockios.h b/arch/frv/include/asm/sockios.h
new file mode 100644
index 00000000000..5dbdd13e6de
--- /dev/null
+++ b/arch/frv/include/asm/sockios.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_SOCKIOS__
+#define _ASM_SOCKIOS__
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+
+#endif /* _ASM_SOCKIOS__ */
+
diff --git a/arch/frv/include/asm/spinlock.h b/arch/frv/include/asm/spinlock.h
new file mode 100644
index 00000000000..fe385f45d1f
--- /dev/null
+++ b/arch/frv/include/asm/spinlock.h
@@ -0,0 +1,17 @@
+/* spinlock.h: spinlocks for FR-V
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_SPINLOCK_H
+#define _ASM_SPINLOCK_H
+
+#error no spinlocks for FR-V yet
+
+#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/frv/include/asm/spr-regs.h b/arch/frv/include/asm/spr-regs.h
new file mode 100644
index 00000000000..d3883021f23
--- /dev/null
+++ b/arch/frv/include/asm/spr-regs.h
@@ -0,0 +1,416 @@
+/* spr-regs.h: special-purpose registers on the FRV
+ *
+ * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_SPR_REGS_H
+#define _ASM_SPR_REGS_H
+
+/*
+ * PSR - Processor Status Register
+ */
+#define PSR_ET 0x00000001 /* enable interrupts/exceptions flag */
+#define PSR_PS 0x00000002 /* previous supervisor mode flag */
+#define PSR_S 0x00000004 /* supervisor mode flag */
+#define PSR_PIL 0x00000078 /* processor external interrupt level */
+#define PSR_PIL_0 0x00000000 /* - no interrupt in progress */
+#define PSR_PIL_13 0x00000068 /* - debugging only */
+#define PSR_PIL_14 0x00000070 /* - debugging in progress */
+#define PSR_PIL_15 0x00000078 /* - NMI in progress */
+#define PSR_EM 0x00000080 /* enable media operation */
+#define PSR_EF 0x00000100 /* enable FPU operation */
+#define PSR_BE 0x00001000 /* endianness mode */
+#define PSR_BE_LE 0x00000000 /* - little endian mode */
+#define PSR_BE_BE 0x00001000 /* - big endian mode */
+#define PSR_CM 0x00002000 /* conditional mode */
+#define PSR_NEM 0x00004000 /* non-excepting mode */
+#define PSR_ICE 0x00010000 /* in-circuit emulation mode */
+#define PSR_VERSION_SHIFT 24 /* CPU silicon ID */
+#define PSR_IMPLE_SHIFT 28 /* CPU core ID */
+
+#define PSR_VERSION(psr) (((psr) >> PSR_VERSION_SHIFT) & 0xf)
+#define PSR_IMPLE(psr) (((psr) >> PSR_IMPLE_SHIFT) & 0xf)
+
+#define PSR_IMPLE_FR401 0x2
+#define PSR_VERSION_FR401_MB93401 0x0
+#define PSR_VERSION_FR401_MB93401A 0x1
+#define PSR_VERSION_FR401_MB93403 0x2
+
+#define PSR_IMPLE_FR405 0x4
+#define PSR_VERSION_FR405_MB93405 0x0
+
+#define PSR_IMPLE_FR451 0x5
+#define PSR_VERSION_FR451_MB93451 0x0
+
+#define PSR_IMPLE_FR501 0x1
+#define PSR_VERSION_FR501_MB93501 0x1
+#define PSR_VERSION_FR501_MB93501A 0x2
+
+#define PSR_IMPLE_FR551 0x3
+#define PSR_VERSION_FR551_MB93555 0x1
+
+#define __get_PSR() ({ unsigned long x; asm volatile("movsg psr,%0" : "=r"(x)); x; })
+#define __set_PSR(V) do { asm volatile("movgs %0,psr" : : "r"(V)); } while(0)
+
+/*
+ * TBR - Trap Base Register
+ */
+#define TBR_TT 0x00000ff0
+#define TBR_TT_INSTR_MMU_MISS (0x01 << 4)
+#define TBR_TT_INSTR_ACC_ERROR (0x02 << 4)
+#define TBR_TT_INSTR_ACC_EXCEP (0x03 << 4)
+#define TBR_TT_PRIV_INSTR (0x06 << 4)
+#define TBR_TT_ILLEGAL_INSTR (0x07 << 4)
+#define TBR_TT_FP_EXCEPTION (0x0d << 4)
+#define TBR_TT_MP_EXCEPTION (0x0e << 4)
+#define TBR_TT_DATA_ACC_ERROR (0x11 << 4)
+#define TBR_TT_DATA_MMU_MISS (0x12 << 4)
+#define TBR_TT_DATA_ACC_EXCEP (0x13 << 4)
+#define TBR_TT_DATA_STR_ERROR (0x14 << 4)
+#define TBR_TT_DIVISION_EXCEP (0x17 << 4)
+#define TBR_TT_COMMIT_EXCEP (0x19 << 4)
+#define TBR_TT_INSTR_TLB_MISS (0x1a << 4)
+#define TBR_TT_DATA_TLB_MISS (0x1b << 4)
+#define TBR_TT_DATA_DAT_EXCEP (0x1d << 4)
+#define TBR_TT_DECREMENT_TIMER (0x1f << 4)
+#define TBR_TT_COMPOUND_EXCEP (0x20 << 4)
+#define TBR_TT_INTERRUPT_1 (0x21 << 4)
+#define TBR_TT_INTERRUPT_2 (0x22 << 4)
+#define TBR_TT_INTERRUPT_3 (0x23 << 4)
+#define TBR_TT_INTERRUPT_4 (0x24 << 4)
+#define TBR_TT_INTERRUPT_5 (0x25 << 4)
+#define TBR_TT_INTERRUPT_6 (0x26 << 4)
+#define TBR_TT_INTERRUPT_7 (0x27 << 4)
+#define TBR_TT_INTERRUPT_8 (0x28 << 4)
+#define TBR_TT_INTERRUPT_9 (0x29 << 4)
+#define TBR_TT_INTERRUPT_10 (0x2a << 4)
+#define TBR_TT_INTERRUPT_11 (0x2b << 4)
+#define TBR_TT_INTERRUPT_12 (0x2c << 4)
+#define TBR_TT_INTERRUPT_13 (0x2d << 4)
+#define TBR_TT_INTERRUPT_14 (0x2e << 4)
+#define TBR_TT_INTERRUPT_15 (0x2f << 4)
+#define TBR_TT_TRAP0 (0x80 << 4)
+#define TBR_TT_TRAP1 (0x81 << 4)
+#define TBR_TT_TRAP2 (0x82 << 4)
+#define TBR_TT_TRAP3 (0x83 << 4)
+#define TBR_TT_TRAP120 (0xf8 << 4)
+#define TBR_TT_TRAP121 (0xf9 << 4)
+#define TBR_TT_TRAP122 (0xfa << 4)
+#define TBR_TT_TRAP123 (0xfb << 4)
+#define TBR_TT_TRAP124 (0xfc << 4)
+#define TBR_TT_TRAP125 (0xfd << 4)
+#define TBR_TT_TRAP126 (0xfe << 4)
+#define TBR_TT_BREAK (0xff << 4)
+
+#define TBR_TT_ATOMIC_CMPXCHG32 TBR_TT_TRAP120
+#define TBR_TT_ATOMIC_XCHG32 TBR_TT_TRAP121
+#define TBR_TT_ATOMIC_XOR TBR_TT_TRAP122
+#define TBR_TT_ATOMIC_OR TBR_TT_TRAP123
+#define TBR_TT_ATOMIC_AND TBR_TT_TRAP124
+#define TBR_TT_ATOMIC_SUB TBR_TT_TRAP125
+#define TBR_TT_ATOMIC_ADD TBR_TT_TRAP126
+
+#define __get_TBR() ({ unsigned long x; asm volatile("movsg tbr,%0" : "=r"(x)); x; })
+
+/*
+ * HSR0 - Hardware Status Register 0
+ */
+#define HSR0_PDM 0x00000007 /* power down mode */
+#define HSR0_PDM_NORMAL 0x00000000 /* - normal mode */
+#define HSR0_PDM_CORE_SLEEP 0x00000001 /* - CPU core sleep mode */
+#define HSR0_PDM_BUS_SLEEP 0x00000003 /* - bus sleep mode */
+#define HSR0_PDM_PLL_RUN 0x00000005 /* - PLL run */
+#define HSR0_PDM_PLL_STOP 0x00000007 /* - PLL stop */
+#define HSR0_GRLE 0x00000040 /* GR lower register set enable */
+#define HSR0_GRHE 0x00000080 /* GR higher register set enable */
+#define HSR0_FRLE 0x00000100 /* FR lower register set enable */
+#define HSR0_FRHE 0x00000200 /* FR higher register set enable */
+#define HSR0_GRN 0x00000400 /* GR quantity */
+#define HSR0_GRN_64 0x00000000 /* - 64 GR registers */
+#define HSR0_GRN_32 0x00000400 /* - 32 GR registers */
+#define HSR0_FRN 0x00000800 /* FR quantity */
+#define HSR0_FRN_64 0x00000000 /* - 64 FR registers */
+#define HSR0_FRN_32 0x00000800 /* - 32 FR registers */
+#define HSR0_SA 0x00001000 /* start address (RAMBOOT#) */
+#define HSR0_ETMI 0x00008000 /* enable TIMERI (64-bit up timer) */
+#define HSR0_ETMD 0x00004000 /* enable TIMERD (32-bit down timer) */
+#define HSR0_PEDAT 0x00010000 /* previous DAT mode */
+#define HSR0_XEDAT 0x00020000 /* exception DAT mode */
+#define HSR0_EDAT 0x00080000 /* enable DAT mode */
+#define HSR0_RME 0x00400000 /* enable RAM mode */
+#define HSR0_EMEM 0x00800000 /* enable MMU_Miss mask */
+#define HSR0_EXMMU 0x01000000 /* enable extended MMU mode */
+#define HSR0_EDMMU 0x02000000 /* enable data MMU */
+#define HSR0_EIMMU 0x04000000 /* enable instruction MMU */
+#define HSR0_CBM 0x08000000 /* copy back mode */
+#define HSR0_CBM_WRITE_THRU 0x00000000 /* - write through */
+#define HSR0_CBM_COPY_BACK 0x08000000 /* - copy back */
+#define HSR0_NWA 0x10000000 /* no write allocate */
+#define HSR0_DCE 0x40000000 /* data cache enable */
+#define HSR0_ICE 0x80000000 /* instruction cache enable */
+
+#define __get_HSR(R) ({ unsigned long x; asm volatile("movsg hsr"#R",%0" : "=r"(x)); x; })
+#define __set_HSR(R,V) do { asm volatile("movgs %0,hsr"#R : : "r"(V)); } while(0)
+
+/*
+ * CCR - Condition Codes Register
+ */
+#define CCR_FCC0 0x0000000f /* FP/Media condition 0 (fcc0 reg) */
+#define CCR_FCC1 0x000000f0 /* FP/Media condition 1 (fcc1 reg) */
+#define CCR_FCC2 0x00000f00 /* FP/Media condition 2 (fcc2 reg) */
+#define CCR_FCC3 0x0000f000 /* FP/Media condition 3 (fcc3 reg) */
+#define CCR_ICC0 0x000f0000 /* Integer condition 0 (icc0 reg) */
+#define CCR_ICC0_C 0x00010000 /* - Carry flag */
+#define CCR_ICC0_V 0x00020000 /* - Overflow flag */
+#define CCR_ICC0_Z 0x00040000 /* - Zero flag */
+#define CCR_ICC0_N 0x00080000 /* - Negative flag */
+#define CCR_ICC1 0x00f00000 /* Integer condition 1 (icc1 reg) */
+#define CCR_ICC2 0x0f000000 /* Integer condition 2 (icc2 reg) */
+#define CCR_ICC3 0xf0000000 /* Integer condition 3 (icc3 reg) */
+
+/*
+ * CCCR - Condition Codes for Conditional Instructions Register
+ */
+#define CCCR_CC0 0x00000003 /* condition 0 (cc0 reg) */
+#define CCCR_CC0_FALSE 0x00000002 /* - condition is false */
+#define CCCR_CC0_TRUE 0x00000003 /* - condition is true */
+#define CCCR_CC1 0x0000000c /* condition 1 (cc1 reg) */
+#define CCCR_CC2 0x00000030 /* condition 2 (cc2 reg) */
+#define CCCR_CC3 0x000000c0 /* condition 3 (cc3 reg) */
+#define CCCR_CC4 0x00000300 /* condition 4 (cc4 reg) */
+#define CCCR_CC5 0x00000c00 /* condition 5 (cc5 reg) */
+#define CCCR_CC6 0x00003000 /* condition 6 (cc6 reg) */
+#define CCCR_CC7 0x0000c000 /* condition 7 (cc7 reg) */
+
+/*
+ * ISR - Integer Status Register
+ */
+#define ISR_EMAM 0x00000001 /* memory misaligned access handling */
+#define ISR_EMAM_EXCEPTION 0x00000000 /* - generate exception */
+#define ISR_EMAM_FUDGE 0x00000001 /* - mask out invalid address bits */
+#define ISR_AEXC 0x00000004 /* accrued [overflow] exception */
+#define ISR_DTT 0x00000018 /* division type trap */
+#define ISR_DTT_IGNORE 0x00000000 /* - ignore division error */
+#define ISR_DTT_DIVBYZERO 0x00000008 /* - generate exception */
+#define ISR_DTT_OVERFLOW 0x00000010 /* - record overflow */
+#define ISR_EDE 0x00000020 /* enable division exception */
+#define ISR_PLI 0x20000000 /* pre-load instruction information */
+#define ISR_QI 0x80000000 /* quad data implementation information */
+
+/*
+ * EPCR0 - Exception PC Register
+ */
+#define EPCR0_V 0x00000001 /* register content validity indicator */
+#define EPCR0_PC 0xfffffffc /* faulting instruction address */
+
+/*
+ * ESR0/14/15 - Exception Status Register
+ */
+#define ESRx_VALID 0x00000001 /* register content validity indicator */
+#define ESRx_EC 0x0000003e /* exception type */
+#define ESRx_EC_DATA_STORE 0x00000000 /* - data_store_error */
+#define ESRx_EC_INSN_ACCESS 0x00000006 /* - instruction_access_error */
+#define ESRx_EC_PRIV_INSN 0x00000008 /* - privileged_instruction */
+#define ESRx_EC_ILL_INSN 0x0000000a /* - illegal_instruction */
+#define ESRx_EC_MP_EXCEP 0x0000001c /* - mp_exception */
+#define ESRx_EC_DATA_ACCESS 0x00000020 /* - data_access_error */
+#define ESRx_EC_DIVISION 0x00000026 /* - division_exception */
+#define ESRx_EC_ITLB_MISS 0x00000034 /* - instruction_access_TLB_miss */
+#define ESRx_EC_DTLB_MISS 0x00000036 /* - data_access_TLB_miss */
+#define ESRx_EC_DATA_ACCESS_DAT 0x0000003a /* - data_access_DAT_exception */
+
+#define ESR0_IAEC 0x00000100 /* info for instruction-access-exception */
+#define ESR0_IAEC_RESV 0x00000000 /* - reserved */
+#define ESR0_IAEC_PROT_VIOL 0x00000100 /* - protection violation */
+
+#define ESR0_ATXC 0x00f00000 /* address translation exception code */
+#define ESR0_ATXC_MMU_MISS 0x00000000 /* - MMU miss exception and more (?) */
+#define ESR0_ATXC_MULTI_DAT 0x00800000 /* - multiple DAT entry hit */
+#define ESR0_ATXC_MULTI_SAT 0x00900000 /* - multiple SAT entry hit */
+#define ESR0_ATXC_AMRTLB_MISS 0x00a00000 /* - MMU/TLB miss exception */
+#define ESR0_ATXC_PRIV_EXCEP 0x00c00000 /* - privilege protection fault */
+#define ESR0_ATXC_WP_EXCEP 0x00d00000 /* - write protection fault */
+
+#define ESR0_EAV 0x00000800 /* true if EAR0 register valid */
+#define ESR15_EAV 0x00000800 /* true if EAR15 register valid */
+
+/*
+ * ESFR1 - Exception Status Valid Flag Register
+ */
+#define ESFR1_ESR0 0x00000001 /* true if ESR0 is valid */
+#define ESFR1_ESR14 0x00004000 /* true if ESR14 is valid */
+#define ESFR1_ESR15 0x00008000 /* true if ESR15 is valid */
+
+/*
+ * MSR - Media Status Register
+ */
+#define MSR0_AOVF 0x00000001 /* overflow exception accrued */
+#define MSRx_OVF 0x00000002 /* overflow exception detected */
+#define MSRx_SIE 0x0000003c /* last SIMD instruction exception detected */
+#define MSRx_SIE_NONE 0x00000000 /* - none detected */
+#define MSRx_SIE_FRkHI_ACCk 0x00000020 /* - exception at FRkHI or ACCk */
+#define MSRx_SIE_FRkLO_ACCk1 0x00000010 /* - exception at FRkLO or ACCk+1 */
+#define MSRx_SIE_FRk1HI_ACCk2 0x00000008 /* - exception at FRk+1HI or ACCk+2 */
+#define MSRx_SIE_FRk1LO_ACCk3 0x00000004 /* - exception at FRk+1LO or ACCk+3 */
+#define MSR0_MTT 0x00007000 /* type of last media trap detected */
+#define MSR0_MTT_NONE 0x00000000 /* - none detected */
+#define MSR0_MTT_OVERFLOW 0x00001000 /* - overflow detected */
+#define MSR0_HI 0x00c00000 /* hardware implementation */
+#define MSR0_HI_ROUNDING 0x00000000 /* - rounding mode */
+#define MSR0_HI_NONROUNDING 0x00c00000 /* - non-rounding mode */
+#define MSR0_EMCI 0x01000000 /* enable media custom instructions */
+#define MSR0_SRDAV 0x10000000 /* select rounding mode of MAVEH */
+#define MSR0_SRDAV_RDAV 0x00000000 /* - controlled by MSR.RDAV */
+#define MSR0_SRDAV_RD 0x10000000 /* - controlled by MSR.RD */
+#define MSR0_RDAV 0x20000000 /* rounding mode of MAVEH */
+#define MSR0_RDAV_NEAREST_MI 0x00000000 /* - round to nearest minus */
+#define MSR0_RDAV_NEAREST_PL 0x20000000 /* - round to nearest plus */
+#define MSR0_RD 0xc0000000 /* rounding mode */
+#define MSR0_RD_NEAREST 0x00000000 /* - nearest */
+#define MSR0_RD_ZERO 0x40000000 /* - zero */
+#define MSR0_RD_POS_INF 0x80000000 /* - positive infinity */
+#define MSR0_RD_NEG_INF 0xc0000000 /* - negative infinity */
+
+/*
+ * IAMPR0-7 - Instruction Address Mapping Register
+ * DAMPR0-7 - Data Address Mapping Register
+ */
+#define xAMPRx_V 0x00000001 /* register content validity indicator */
+#define DAMPRx_WP 0x00000002 /* write protect */
+#define DAMPRx_WP_RW 0x00000000 /* - read/write */
+#define DAMPRx_WP_RO 0x00000002 /* - read-only */
+#define xAMPRx_C 0x00000004 /* cached/uncached */
+#define xAMPRx_C_CACHED 0x00000000 /* - cached */
+#define xAMPRx_C_UNCACHED 0x00000004 /* - uncached */
+#define xAMPRx_S 0x00000008 /* supervisor only */
+#define xAMPRx_S_USER 0x00000000 /* - userspace can access */
+#define xAMPRx_S_KERNEL 0x00000008 /* - kernel only */
+#define xAMPRx_SS 0x000000f0 /* segment size */
+#define xAMPRx_SS_16Kb 0x00000000 /* - 16 kilobytes */
+#define xAMPRx_SS_64Kb 0x00000010 /* - 64 kilobytes */
+#define xAMPRx_SS_256Kb 0x00000020 /* - 256 kilobytes */
+#define xAMPRx_SS_1Mb 0x00000030 /* - 1 megabyte */
+#define xAMPRx_SS_2Mb 0x00000040 /* - 2 megabytes */
+#define xAMPRx_SS_4Mb 0x00000050 /* - 4 megabytes */
+#define xAMPRx_SS_8Mb 0x00000060 /* - 8 megabytes */
+#define xAMPRx_SS_16Mb 0x00000070 /* - 16 megabytes */
+#define xAMPRx_SS_32Mb 0x00000080 /* - 32 megabytes */
+#define xAMPRx_SS_64Mb 0x00000090 /* - 64 megabytes */
+#define xAMPRx_SS_128Mb 0x000000a0 /* - 128 megabytes */
+#define xAMPRx_SS_256Mb 0x000000b0 /* - 256 megabytes */
+#define xAMPRx_SS_512Mb 0x000000c0 /* - 512 megabytes */
+#define xAMPRx_RESERVED8 0x00000100 /* reserved bit */
+#define xAMPRx_NG 0x00000200 /* non-global */
+#define xAMPRx_L 0x00000400 /* locked */
+#define xAMPRx_M 0x00000800 /* modified */
+#define xAMPRx_D 0x00001000 /* DAT entry */
+#define xAMPRx_RESERVED13 0x00002000 /* reserved bit */
+#define xAMPRx_PPFN 0xfff00000 /* physical page frame number */
+
+#define xAMPRx_V_BIT 0
+#define DAMPRx_WP_BIT 1
+#define xAMPRx_C_BIT 2
+#define xAMPRx_S_BIT 3
+#define xAMPRx_RESERVED8_BIT 8
+#define xAMPRx_NG_BIT 9
+#define xAMPRx_L_BIT 10
+#define xAMPRx_M_BIT 11
+#define xAMPRx_D_BIT 12
+#define xAMPRx_RESERVED13_BIT 13
+
+#define __get_IAMPR(R) ({ unsigned long x; asm volatile("movsg iampr"#R",%0" : "=r"(x)); x; })
+#define __get_DAMPR(R) ({ unsigned long x; asm volatile("movsg dampr"#R",%0" : "=r"(x)); x; })
+
+#define __get_IAMLR(R) ({ unsigned long x; asm volatile("movsg iamlr"#R",%0" : "=r"(x)); x; })
+#define __get_DAMLR(R) ({ unsigned long x; asm volatile("movsg damlr"#R",%0" : "=r"(x)); x; })
+
+#define __set_IAMPR(R,V) do { asm volatile("movgs %0,iampr"#R : : "r"(V)); } while(0)
+#define __set_DAMPR(R,V) do { asm volatile("movgs %0,dampr"#R : : "r"(V)); } while(0)
+
+#define __set_IAMLR(R,V) do { asm volatile("movgs %0,iamlr"#R : : "r"(V)); } while(0)
+#define __set_DAMLR(R,V) do { asm volatile("movgs %0,damlr"#R : : "r"(V)); } while(0)
+
+#define save_dampr(R, _dampr) \
+do { \
+ asm volatile("movsg dampr"R",%0" : "=r"(_dampr)); \
+} while(0)
+
+#define restore_dampr(R, _dampr) \
+do { \
+ asm volatile("movgs %0,dampr"R :: "r"(_dampr)); \
+} while(0)
+
+/*
+ * AMCR - Address Mapping Control Register
+ */
+#define AMCR_IAMRN 0x000000ff /* quantity of IAMPR registers */
+#define AMCR_DAMRN 0x0000ff00 /* quantity of DAMPR registers */
+
+/*
+ * TTBR - Address Translation Table Base Register
+ */
+#define __get_TTBR() ({ unsigned long x; asm volatile("movsg ttbr,%0" : "=r"(x)); x; })
+
+/*
+ * TPXR - TLB Probe Extend Register
+ */
+#define TPXR_E 0x00000001
+#define TPXR_LMAX_SHIFT 20
+#define TPXR_LMAX_SMASK 0xf
+#define TPXR_WMAX_SHIFT 24
+#define TPXR_WMAX_SMASK 0xf
+#define TPXR_WAY_SHIFT 28
+#define TPXR_WAY_SMASK 0xf
+
+/*
+ * DCR - Debug Control Register
+ */
+#define DCR_IBCE3 0x00000001 /* break on conditional insn pointed to by IBAR3 */
+#define DCR_IBE3 0x00000002 /* break on insn pointed to by IBAR3 */
+#define DCR_IBCE1 0x00000004 /* break on conditional insn pointed to by IBAR2 */
+#define DCR_IBE1 0x00000008 /* break on insn pointed to by IBAR2 */
+#define DCR_IBCE2 0x00000010 /* break on conditional insn pointed to by IBAR1 */
+#define DCR_IBE2 0x00000020 /* break on insn pointed to by IBAR1 */
+#define DCR_IBCE0 0x00000040 /* break on conditional insn pointed to by IBAR0 */
+#define DCR_IBE0 0x00000080 /* break on insn pointed to by IBAR0 */
+
+#define DCR_DDBE1 0x00004000 /* use DBDR1x when checking DBAR1 */
+#define DCR_DWBE1 0x00008000 /* break on store to address in DBAR1/DBMR1x */
+#define DCR_DRBE1 0x00010000 /* break on load from address in DBAR1/DBMR1x */
+#define DCR_DDBE0 0x00020000 /* use DBDR0x when checking DBAR0 */
+#define DCR_DWBE0 0x00040000 /* break on store to address in DBAR0/DBMR0x */
+#define DCR_DRBE0 0x00080000 /* break on load from address in DBAR0/DBMR0x */
+
+#define DCR_EIM 0x0c000000 /* external interrupt disable */
+#define DCR_IBM 0x10000000 /* instruction break disable */
+#define DCR_SE 0x20000000 /* single step enable */
+#define DCR_EBE 0x40000000 /* exception break enable */
+
+/*
+ * BRR - Break Interrupt Request Register
+ */
+#define BRR_ST 0x00000001 /* single-step detected */
+#define BRR_SB 0x00000002 /* break instruction detected */
+#define BRR_BB 0x00000004 /* branch with hint detected */
+#define BRR_CBB 0x00000008 /* branch to LR detected */
+#define BRR_IBx 0x000000f0 /* hardware breakpoint detected */
+#define BRR_DBx 0x00000f00 /* hardware watchpoint detected */
+#define BRR_DBNEx 0x0000f000 /* ? */
+#define BRR_EBTT 0x00ff0000 /* trap type of exception break */
+#define BRR_TB 0x10000000 /* external break request detected */
+#define BRR_CB 0x20000000 /* ICE break command detected */
+#define BRR_EB 0x40000000 /* exception break detected */
+
+/*
+ * BPSR - Break PSR Save Register
+ */
+#define BPSR_BET 0x00000001 /* former PSR.ET */
+#define BPSR_BS 0x00001000 /* former PSR.S */
+
+#endif /* _ASM_SPR_REGS_H */
diff --git a/arch/frv/include/asm/stat.h b/arch/frv/include/asm/stat.h
new file mode 100644
index 00000000000..ce56de9b37b
--- /dev/null
+++ b/arch/frv/include/asm/stat.h
@@ -0,0 +1,100 @@
+#ifndef _ASM_STAT_H
+#define _ASM_STAT_H
+
+struct __old_kernel_stat {
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+};
+
+/* This matches struct stat in uClibc/glibc. */
+struct stat {
+ unsigned char __pad1[6];
+ unsigned short st_dev;
+
+ unsigned long __pad2;
+ unsigned long st_ino;
+
+ unsigned short __pad3;
+ unsigned short st_mode;
+ unsigned short __pad4;
+ unsigned short st_nlink;
+
+ unsigned short __pad5;
+ unsigned short st_uid;
+ unsigned short __pad6;
+ unsigned short st_gid;
+
+ unsigned char __pad7[6];
+ unsigned short st_rdev;
+
+ unsigned long __pad8;
+ unsigned long st_size;
+
+ unsigned long __pad9; /* align 64-bit st_blocks to 2-word */
+ unsigned long st_blksize;
+
+ unsigned long __pad10; /* future possible st_blocks high bits */
+ unsigned long st_blocks; /* Number 512-byte blocks allocated. */
+
+ unsigned long __unused1;
+ unsigned long st_atime;
+
+ unsigned long __unused2;
+ unsigned long st_mtime;
+
+ unsigned long __unused3;
+ unsigned long st_ctime;
+
+ unsigned long long __unused4;
+};
+
+/* This matches struct stat64 in uClibc/glibc. The layout is exactly
+ the same as that of struct stat above, with 64-bit types taking up
+ space that was formerly used by padding. stat syscalls are still
+ different from stat64, though, in that the former tests for
+ overflow. */
+struct stat64 {
+ unsigned char __pad1[6];
+ unsigned short st_dev;
+
+ unsigned long long st_ino;
+
+ unsigned int st_mode;
+ unsigned int st_nlink;
+
+ unsigned long st_uid;
+ unsigned long st_gid;
+
+ unsigned char __pad2[6];
+ unsigned short st_rdev;
+
+ long long st_size;
+
+ unsigned long __pad3; /* align 64-bit st_blocks to 2-word */
+ unsigned long st_blksize;
+
+ unsigned long __pad4; /* future possible st_blocks high bits */
+ unsigned long st_blocks; /* Number 512-byte blocks allocated. */
+
+ unsigned long st_atime_nsec;
+ unsigned long st_atime;
+
+ unsigned int st_mtime_nsec;
+ unsigned long st_mtime;
+
+ unsigned long st_ctime_nsec;
+ unsigned long st_ctime;
+
+ unsigned long long __unused4;
+};
+
+#endif /* _ASM_STAT_H */
diff --git a/arch/frv/include/asm/statfs.h b/arch/frv/include/asm/statfs.h
new file mode 100644
index 00000000000..741f586045b
--- /dev/null
+++ b/arch/frv/include/asm/statfs.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_STATFS_H
+#define _ASM_STATFS_H
+
+#include <asm-generic/statfs.h>
+
+#endif /* _ASM_STATFS_H */
+
diff --git a/arch/frv/include/asm/string.h b/arch/frv/include/asm/string.h
new file mode 100644
index 00000000000..5ed310f64b7
--- /dev/null
+++ b/arch/frv/include/asm/string.h
@@ -0,0 +1,51 @@
+/* string.h: FRV string handling
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_STRING_H_
+#define _ASM_STRING_H_
+
+#ifdef __KERNEL__ /* only set these up for kernel code */
+
+#define __HAVE_ARCH_MEMSET 1
+#define __HAVE_ARCH_MEMCPY 1
+
+extern void *memset(void *, int, __kernel_size_t);
+extern void *memcpy(void *, const void *, __kernel_size_t);
+
+#else /* KERNEL */
+
+/*
+ * let user libraries deal with these,
+ * IMHO the kernel has no place defining these functions for user apps
+ */
+
+#define __HAVE_ARCH_STRCPY 1
+#define __HAVE_ARCH_STRNCPY 1
+#define __HAVE_ARCH_STRCAT 1
+#define __HAVE_ARCH_STRNCAT 1
+#define __HAVE_ARCH_STRCMP 1
+#define __HAVE_ARCH_STRNCMP 1
+#define __HAVE_ARCH_STRNICMP 1
+#define __HAVE_ARCH_STRCHR 1
+#define __HAVE_ARCH_STRRCHR 1
+#define __HAVE_ARCH_STRSTR 1
+#define __HAVE_ARCH_STRLEN 1
+#define __HAVE_ARCH_STRNLEN 1
+#define __HAVE_ARCH_MEMSET 1
+#define __HAVE_ARCH_MEMCPY 1
+#define __HAVE_ARCH_MEMMOVE 1
+#define __HAVE_ARCH_MEMSCAN 1
+#define __HAVE_ARCH_MEMCMP 1
+#define __HAVE_ARCH_MEMCHR 1
+#define __HAVE_ARCH_STRTOK 1
+
+#endif /* KERNEL */
+#endif /* _ASM_STRING_H_ */
diff --git a/arch/frv/include/asm/swab.h b/arch/frv/include/asm/swab.h
new file mode 100644
index 00000000000..f305834b479
--- /dev/null
+++ b/arch/frv/include/asm/swab.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_SWAB_H
+#define _ASM_SWAB_H
+
+#include <linux/types.h>
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+# define __SWAB_64_THRU_32__
+#endif
+
+#endif /* _ASM_SWAB_H */
diff --git a/arch/frv/include/asm/syscall.h b/arch/frv/include/asm/syscall.h
new file mode 100644
index 00000000000..70689eb29b9
--- /dev/null
+++ b/arch/frv/include/asm/syscall.h
@@ -0,0 +1,123 @@
+/* syscall parameter access functions
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H
+
+#include <linux/err.h>
+#include <asm/ptrace.h>
+
+/*
+ * Get the system call number or -1
+ */
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->syscallno;
+}
+
+/*
+ * Restore the clobbered GR8 register
+ * (1st syscall arg was overwritten with syscall return or error)
+ */
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->gr8 = regs->orig_gr8;
+}
+
+/*
+ * See if the syscall return value is an error, returning it if it is and 0 if
+ * not
+ */
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return IS_ERR_VALUE(regs->gr8) ? regs->gr8 : 0;
+}
+
+/*
+ * Get the syscall return value
+ */
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->gr8;
+}
+
+/*
+ * Set the syscall return value
+ */
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ if (error)
+ regs->gr8 = -error;
+ else
+ regs->gr8 = val;
+}
+
+/*
+ * Retrieve the system call arguments
+ */
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ /*
+ * Do this simply for now. If we need to start supporting
+ * fetching arguments from arbitrary indices, this will need some
+ * extra logic. Presently there are no in-tree users that depend
+ * on this behaviour.
+ */
+ BUG_ON(i);
+
+ /* Argument pattern is: GR8, GR9, GR10, GR11, GR12, GR13 */
+ switch (n) {
+ case 6: args[5] = regs->gr13;
+ case 5: args[4] = regs->gr12;
+ case 4: args[3] = regs->gr11;
+ case 3: args[2] = regs->gr10;
+ case 2: args[1] = regs->gr9;
+ case 1: args[0] = regs->gr8;
+ break;
+ default:
+ BUG();
+ }
+}
+
+/*
+ * Alter the system call arguments
+ */
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ /* Same note as above applies */
+ BUG_ON(i);
+
+ switch (n) {
+ case 6: regs->gr13 = args[5];
+ case 5: regs->gr12 = args[4];
+ case 4: regs->gr11 = args[3];
+ case 3: regs->gr10 = args[2];
+ case 2: regs->gr9 = args[1];
+ case 1: regs->gr8 = args[0];
+ break;
+ default:
+ BUG();
+ }
+}
+
+#endif /* _ASM_SYSCALL_H */
diff --git a/arch/frv/include/asm/system.h b/arch/frv/include/asm/system.h
new file mode 100644
index 00000000000..6c10fd2c626
--- /dev/null
+++ b/arch/frv/include/asm/system.h
@@ -0,0 +1,158 @@
+/* system.h: FR-V CPU control definitions
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_SYSTEM_H
+#define _ASM_SYSTEM_H
+
+#include <linux/types.h>
+#include <linux/linkage.h>
+#include <linux/kernel.h>
+
+struct thread_struct;
+
+/*
+ * switch_to(prev, next) should switch from task `prev' to `next'
+ * `prev' will never be the same as `next'.
+ * The `mb' is to tell GCC not to cache `current' across this call.
+ */
+extern asmlinkage
+struct task_struct *__switch_to(struct thread_struct *prev_thread,
+ struct thread_struct *next_thread,
+ struct task_struct *prev);
+
+#define switch_to(prev, next, last) \
+do { \
+ (prev)->thread.sched_lr = \
+ (unsigned long) __builtin_return_address(0); \
+ (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
+ mb(); \
+} while(0)
+
+/*
+ * Force strict CPU ordering.
+ */
+#define nop() asm volatile ("nop"::)
+#define mb() asm volatile ("membar" : : :"memory")
+#define rmb() asm volatile ("membar" : : :"memory")
+#define wmb() asm volatile ("membar" : : :"memory")
+#define read_barrier_depends() do { } while (0)
+
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do {} while(0)
+#define set_mb(var, value) \
+ do { var = (value); barrier(); } while (0)
+
+extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2)));
+extern void free_initmem(void);
+
+#define arch_align_stack(x) (x)
+
+/*****************************************************************************/
+/*
+ * compare and conditionally exchange value with memory
+ * - if (*ptr == test) then orig = *ptr; *ptr = test;
+ * - if (*ptr != test) then orig = *ptr;
+ */
+extern uint64_t __cmpxchg_64(uint64_t test, uint64_t new, volatile uint64_t *v);
+
+#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+
+#define cmpxchg(ptr, test, new) \
+({ \
+ __typeof__(ptr) __xg_ptr = (ptr); \
+ __typeof__(*(ptr)) __xg_orig, __xg_tmp; \
+ __typeof__(*(ptr)) __xg_test = (test); \
+ __typeof__(*(ptr)) __xg_new = (new); \
+ \
+ switch (sizeof(__xg_orig)) { \
+ case 4: \
+ asm volatile( \
+ "0: \n" \
+ " orcc gr0,gr0,gr0,icc3 \n" \
+ " ckeq icc3,cc7 \n" \
+ " ld.p %M0,%1 \n" \
+ " orcr cc7,cc7,cc3 \n" \
+ " sub%I4cc %1,%4,%2,icc0 \n" \
+ " bne icc0,#0,1f \n" \
+ " cst.p %3,%M0 ,cc3,#1 \n" \
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
+ " beq icc3,#0,0b \n" \
+ "1: \n" \
+ : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
+ : "r"(__xg_new), "NPr"(__xg_test) \
+ : "memory", "cc7", "cc3", "icc3", "icc0" \
+ ); \
+ break; \
+ \
+ default: \
+ __xg_orig = (__typeof__(__xg_orig))0; \
+ asm volatile("break"); \
+ break; \
+ } \
+ \
+ __xg_orig; \
+})
+
+#else
+
+extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
+
+#define cmpxchg(ptr, test, new) \
+({ \
+ __typeof__(ptr) __xg_ptr = (ptr); \
+ __typeof__(*(ptr)) __xg_orig; \
+ __typeof__(*(ptr)) __xg_test = (test); \
+ __typeof__(*(ptr)) __xg_new = (new); \
+ \
+ switch (sizeof(__xg_orig)) { \
+ case 4: __xg_orig = (__force __typeof__(*ptr)) \
+ __cmpxchg_32((__force uint32_t *)__xg_ptr, \
+ (__force uint32_t)__xg_test, \
+ (__force uint32_t)__xg_new); break; \
+ default: \
+ __xg_orig = (__typeof__(__xg_orig))0; \
+ asm volatile("break"); \
+ break; \
+ } \
+ \
+ __xg_orig; \
+})
+
+#endif
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return cmpxchg((unsigned long *)ptr, old, new);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+#endif /* _ASM_SYSTEM_H */
diff --git a/arch/frv/include/asm/termbits.h b/arch/frv/include/asm/termbits.h
new file mode 100644
index 00000000000..7722e19cc34
--- /dev/null
+++ b/arch/frv/include/asm/termbits.h
@@ -0,0 +1,203 @@
+#ifndef _ASM_TERMBITS_H__
+#define _ASM_TERMBITS_H__
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+struct ktermios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+
+/* c_iflag bits */
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+#define IUTF8 0040000
+
+/* c_oflag bits */
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define BOTHER 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
+#define CIBAUD 002003600000 /* Input baud rate */
+#define CTVB 004000000000 /* VisioBraille Terminal flow control */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+/* c_lflag bits */
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+#define EXTPROC 0200000
+
+
+/* tcflow() and TCXONC use these */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* _ASM_TERMBITS_H__ */
+
diff --git a/arch/frv/include/asm/termios.h b/arch/frv/include/asm/termios.h
new file mode 100644
index 00000000000..b4868aafe79
--- /dev/null
+++ b/arch/frv/include/asm/termios.h
@@ -0,0 +1,58 @@
+#ifndef _ASM_TERMIOS_H
+#define _ASM_TERMIOS_H
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+#ifdef __KERNEL__
+/* intr=^C quit=^| erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+#endif
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+#define TIOCM_MODEM_BITS TIOCM_OUT2 /* IRDA support */
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+#ifdef __KERNEL__
+#include <asm-generic/termios-base.h>
+#endif
+
+#endif /* _ASM_TERMIOS_H */
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
new file mode 100644
index 00000000000..92d83ea99ae
--- /dev/null
+++ b/arch/frv/include/asm/thread_info.h
@@ -0,0 +1,137 @@
+/* thread_info.h: description
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ * Derived from include/asm-i386/thread_info.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#endif
+
+#define THREAD_SIZE 8192
+
+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants must also be changed
+ */
+#ifndef __ASSEMBLY__
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ unsigned long flags; /* low level flags */
+ unsigned long status; /* thread-synchronous flags */
+ __u32 cpu; /* current CPU */
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+
+ mm_segment_t addr_limit; /* thread address space:
+ * 0-0xBFFFFFFF for user-thead
+ * 0-0xFFFFFFFF for kernel-thread
+ */
+ struct restart_block restart_block;
+
+ __u8 supervisor_stack[0];
+};
+
+#else /* !__ASSEMBLY__ */
+
+#include <asm/asm-offsets.h>
+
+#endif
+
+#define PREEMPT_ACTIVE 0x10000000
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ */
+#ifndef __ASSEMBLY__
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* how to get the thread information struct from C */
+register struct thread_info *__current_thread_info asm("gr15");
+
+#define current_thread_info() ({ __current_thread_info; })
+
+#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
+
+/* thread information allocation */
+#ifdef CONFIG_DEBUG_STACK_USAGE
+#define alloc_thread_info_node(tsk, node) \
+ kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
+#else
+#define alloc_thread_info_node(tsk, node) \
+ kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
+#endif
+
+#define free_thread_info(info) kfree(info)
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
+#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
+#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
+
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+
+#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
+#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
+
+/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_USEDFPM 0x0001 /* FPU/Media was used by this task this quantum (SMP) */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/frv/include/asm/timer-regs.h b/arch/frv/include/asm/timer-regs.h
new file mode 100644
index 00000000000..6c5a871ce5e
--- /dev/null
+++ b/arch/frv/include/asm/timer-regs.h
@@ -0,0 +1,106 @@
+/* timer-regs.h: hardware timer register definitions
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_TIMER_REGS_H
+#define _ASM_TIMER_REGS_H
+
+#include <asm/sections.h>
+
+extern unsigned long __nongprelbss __clkin_clock_speed_HZ;
+extern unsigned long __nongprelbss __ext_bus_clock_speed_HZ;
+extern unsigned long __nongprelbss __res_bus_clock_speed_HZ;
+extern unsigned long __nongprelbss __sdram_clock_speed_HZ;
+extern unsigned long __nongprelbss __core_bus_clock_speed_HZ;
+extern unsigned long __nongprelbss __core_clock_speed_HZ;
+extern unsigned long __nongprelbss __dsu_clock_speed_HZ;
+extern unsigned long __nongprelbss __serial_clock_speed_HZ;
+
+#define __get_CLKC() ({ *(volatile unsigned long *)(0xfeff9a00); })
+
+static inline void __set_CLKC(unsigned long v)
+{
+ int tmp;
+
+ asm volatile(" st%I0.p %2,%M0 \n"
+ " setlos %3,%1 \n"
+ " membar \n"
+ "0: \n"
+ " subicc %1,#1,%1,icc0 \n"
+ " bnc icc0,#1,0b \n"
+ : "=m"(*(volatile unsigned long *) 0xfeff9a00), "=r"(tmp)
+ : "r"(v), "i"(256)
+ : "icc0");
+}
+
+#define __get_TCTR() ({ *(volatile unsigned long *)(0xfeff9418); })
+#define __get_TPRV() ({ *(volatile unsigned long *)(0xfeff9420); })
+#define __get_TPRCKSL() ({ *(volatile unsigned long *)(0xfeff9428); })
+#define __get_TCSR(T) ({ *(volatile unsigned long *)(0xfeff9400 + 8 * (T)); })
+#define __get_TxCKSL(T) ({ *(volatile unsigned long *)(0xfeff9430 + 8 * (T)); })
+
+#define __get_TCSR_DATA(T) ({ __get_TCSR(T) >> 24; })
+
+#define __set_TCTR(V) do { *(volatile unsigned long *)(0xfeff9418) = (V); mb(); } while(0)
+#define __set_TPRV(V) do { *(volatile unsigned long *)(0xfeff9420) = (V) << 24; mb(); } while(0)
+#define __set_TPRCKSL(V) do { *(volatile unsigned long *)(0xfeff9428) = (V); mb(); } while(0)
+#define __set_TCSR(T,V) \
+do { *(volatile unsigned long *)(0xfeff9400 + 8 * (T)) = (V); mb(); } while(0)
+
+#define __set_TxCKSL(T,V) \
+do { *(volatile unsigned long *)(0xfeff9430 + 8 * (T)) = (V); mb(); } while(0)
+
+#define __set_TCSR_DATA(T,V) __set_TCSR(T, (V) << 24)
+#define __set_TxCKSL_DATA(T,V) __set_TxCKSL(T, TxCKSL_EIGHT | __TxCKSL_SELECT((V)))
+
+/* clock control register */
+#define CLKC_CMODE 0x0f000000
+#define CLKC_SLPL 0x000f0000
+#define CLKC_P0 0x00000100
+#define CLKC_CM 0x00000003
+
+#define CLKC_CMODE_s 24
+
+/* timer control register - non-readback mode */
+#define TCTR_MODE_0 0x00000000
+#define TCTR_MODE_2 0x04000000
+#define TCTR_MODE_4 0x08000000
+#define TCTR_MODE_5 0x0a000000
+#define TCTR_RL_LATCH 0x00000000
+#define TCTR_RL_RW_LOW8 0x10000000
+#define TCTR_RL_RW_HIGH8 0x20000000
+#define TCTR_RL_RW_LH8 0x30000000
+#define TCTR_SC_CTR0 0x00000000
+#define TCTR_SC_CTR1 0x40000000
+#define TCTR_SC_CTR2 0x80000000
+
+/* timer control register - readback mode */
+#define TCTR_CNT0 0x02000000
+#define TCTR_CNT1 0x04000000
+#define TCTR_CNT2 0x08000000
+#define TCTR_NSTATUS 0x10000000
+#define TCTR_NCOUNT 0x20000000
+#define TCTR_SC_READBACK 0xc0000000
+
+/* timer control status registers - non-readback mode */
+#define TCSRx_DATA 0xff000000
+
+/* timer control status registers - readback mode */
+#define TCSRx_OUTPUT 0x80000000
+#define TCSRx_NULLCOUNT 0x40000000
+#define TCSRx_RL 0x30000000
+#define TCSRx_MODE 0x07000000
+
+/* timer clock select registers */
+#define TxCKSL_SELECT 0x0f000000
+#define __TxCKSL_SELECT(X) ((X) << 24)
+#define TxCKSL_EIGHT 0xf0000000
+
+#endif /* _ASM_TIMER_REGS_H */
diff --git a/arch/frv/include/asm/timex.h b/arch/frv/include/asm/timex.h
new file mode 100644
index 00000000000..a89bddefdac
--- /dev/null
+++ b/arch/frv/include/asm/timex.h
@@ -0,0 +1,20 @@
+/* timex.h: FR-V architecture timex specifications
+ */
+#ifndef _ASM_TIMEX_H
+#define _ASM_TIMEX_H
+
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
+
+typedef unsigned long cycles_t;
+
+static inline cycles_t get_cycles(void)
+{
+ return 0;
+}
+
+#define vxtime_lock() do {} while (0)
+#define vxtime_unlock() do {} while (0)
+
+#endif
+
diff --git a/arch/frv/include/asm/tlb.h b/arch/frv/include/asm/tlb.h
new file mode 100644
index 00000000000..cd458eb6d75
--- /dev/null
+++ b/arch/frv/include/asm/tlb.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_TLB_H
+#define _ASM_TLB_H
+
+#include <asm/tlbflush.h>
+
+#ifdef CONFIG_MMU
+extern void check_pgt_cache(void);
+#else
+#define check_pgt_cache() do {} while(0)
+#endif
+
+/*
+ * we don't need any special per-pte or per-vma handling...
+ */
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+
+/*
+ * .. because we flush the whole mm when it fills up
+ */
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+
+#endif /* _ASM_TLB_H */
+
diff --git a/arch/frv/include/asm/tlbflush.h b/arch/frv/include/asm/tlbflush.h
new file mode 100644
index 00000000000..7ac5eafc5d9
--- /dev/null
+++ b/arch/frv/include/asm/tlbflush.h
@@ -0,0 +1,73 @@
+/* tlbflush.h: TLB flushing functions
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_TLBFLUSH_H
+#define _ASM_TLBFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+
+#ifdef CONFIG_MMU
+
+#ifndef __ASSEMBLY__
+extern void asmlinkage __flush_tlb_all(void);
+extern void asmlinkage __flush_tlb_mm(unsigned long contextid);
+extern void asmlinkage __flush_tlb_page(unsigned long contextid, unsigned long start);
+extern void asmlinkage __flush_tlb_range(unsigned long contextid,
+ unsigned long start, unsigned long end);
+#endif /* !__ASSEMBLY__ */
+
+#define flush_tlb_all() \
+do { \
+ preempt_disable(); \
+ __flush_tlb_all(); \
+ preempt_enable(); \
+} while(0)
+
+#define flush_tlb_mm(mm) \
+do { \
+ preempt_disable(); \
+ __flush_tlb_mm((mm)->context.id); \
+ preempt_enable(); \
+} while(0)
+
+#define flush_tlb_range(vma,start,end) \
+do { \
+ preempt_disable(); \
+ __flush_tlb_range((vma)->vm_mm->context.id, start, end); \
+ preempt_enable(); \
+} while(0)
+
+#define flush_tlb_page(vma,addr) \
+do { \
+ preempt_disable(); \
+ __flush_tlb_page((vma)->vm_mm->context.id, addr); \
+ preempt_enable(); \
+} while(0)
+
+
+#define __flush_tlb_global() flush_tlb_all()
+#define flush_tlb() flush_tlb_all()
+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
+
+#else
+
+#define flush_tlb() BUG()
+#define flush_tlb_all() BUG()
+#define flush_tlb_mm(mm) BUG()
+#define flush_tlb_page(vma,addr) BUG()
+#define flush_tlb_range(mm,start,end) BUG()
+#define flush_tlb_kernel_range(start, end) BUG()
+
+#endif
+
+
+#endif /* _ASM_TLBFLUSH_H */
diff --git a/arch/frv/include/asm/topology.h b/arch/frv/include/asm/topology.h
new file mode 100644
index 00000000000..94272435270
--- /dev/null
+++ b/arch/frv/include/asm/topology.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_TOPOLOGY_H
+#define _ASM_TOPOLOGY_H
+
+#ifdef CONFIG_NUMA
+
+#error NUMA not supported yet
+
+#endif /* CONFIG_NUMA */
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_TOPOLOGY_H */
diff --git a/arch/frv/include/asm/types.h b/arch/frv/include/asm/types.h
new file mode 100644
index 00000000000..390a612f3a5
--- /dev/null
+++ b/arch/frv/include/asm/types.h
@@ -0,0 +1,26 @@
+/* types.h: FRV types
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_TYPES_H
+#define _ASM_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+#define BITS_PER_LONG 32
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_TYPES_H */
diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h
new file mode 100644
index 00000000000..0b67ec5b441
--- /dev/null
+++ b/arch/frv/include/asm/uaccess.h
@@ -0,0 +1,319 @@
+/* uaccess.h: userspace accessor functions
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UACCESS_H
+#define _ASM_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/segment.h>
+#include <asm/sections.h>
+
+#define HAVE_ARCH_UNMAPPED_AREA /* we decide where to put mmaps */
+
+#define __ptr(x) ((unsigned long __force *)(x))
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * check that a range of addresses falls within the current address limit
+ */
+static inline int ___range_ok(unsigned long addr, unsigned long size)
+{
+#ifdef CONFIG_MMU
+ int flag = -EFAULT, tmp;
+
+ asm volatile (
+ " addcc %3,%2,%1,icc0 \n" /* set C-flag if addr+size>4GB */
+ " subcc.p %1,%4,gr0,icc1 \n" /* jump if addr+size>limit */
+ " bc icc0,#0,0f \n"
+ " bhi icc1,#0,0f \n"
+ " setlos #0,%0 \n" /* mark okay */
+ "0: \n"
+ : "=r"(flag), "=&r"(tmp)
+ : "r"(addr), "r"(size), "r"(get_addr_limit()), "0"(flag)
+ );
+
+ return flag;
+
+#else
+
+ if (addr < memory_start ||
+ addr > memory_end ||
+ size > memory_end - memory_start ||
+ addr + size > memory_end)
+ return -EFAULT;
+
+ return 0;
+#endif
+}
+
+#define __range_ok(addr,size) ___range_ok((unsigned long) (addr), (unsigned long) (size))
+
+#define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0)
+#define __access_ok(addr,size) (__range_ok((addr), (size)) == 0)
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+/* Returns 0 if exception not found and fixup otherwise. */
+extern unsigned long search_exception_table(unsigned long);
+
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ */
+#define __put_user(x, ptr) \
+({ \
+ int __pu_err = 0; \
+ \
+ typeof(*(ptr)) __pu_val = (x); \
+ __chk_user_ptr(ptr); \
+ \
+ switch (sizeof (*(ptr))) { \
+ case 1: \
+ __put_user_asm(__pu_err, __pu_val, ptr, "b", "r"); \
+ break; \
+ case 2: \
+ __put_user_asm(__pu_err, __pu_val, ptr, "h", "r"); \
+ break; \
+ case 4: \
+ __put_user_asm(__pu_err, __pu_val, ptr, "", "r"); \
+ break; \
+ case 8: \
+ __put_user_asm(__pu_err, __pu_val, ptr, "d", "e"); \
+ break; \
+ default: \
+ __pu_err = __put_user_bad(); \
+ break; \
+ } \
+ __pu_err; \
+})
+
+#define put_user(x, ptr) \
+({ \
+ typeof(*(ptr)) __user *_p = (ptr); \
+ int _e; \
+ \
+ _e = __range_ok(_p, sizeof(*_p)); \
+ if (_e == 0) \
+ _e = __put_user((x), _p); \
+ _e; \
+})
+
+extern int __put_user_bad(void);
+
+/*
+ * Tell gcc we read from memory instead of writing: this is because
+ * we do not write to any memory gcc knows about, so there are no
+ * aliasing issues.
+ */
+
+#ifdef CONFIG_MMU
+
+#define __put_user_asm(err,x,ptr,dsize,constraint) \
+do { \
+ asm volatile("1: st"dsize"%I1 %2,%M1 \n" \
+ "2: \n" \
+ ".subsection 2 \n" \
+ "3: setlos %3,%0 \n" \
+ " bra 2b \n" \
+ ".previous \n" \
+ ".section __ex_table,\"a\" \n" \
+ " .balign 8 \n" \
+ " .long 1b,3b \n" \
+ ".previous" \
+ : "=r" (err) \
+ : "m" (*__ptr(ptr)), constraint (x), "i"(-EFAULT), "0"(err) \
+ : "memory"); \
+} while (0)
+
+#else
+
+#define __put_user_asm(err,x,ptr,bwl,con) \
+do { \
+ asm(" st"bwl"%I0 %1,%M0 \n" \
+ " membar \n" \
+ : \
+ : "m" (*__ptr(ptr)), con (x) \
+ : "memory"); \
+} while (0)
+
+#endif
+
+/*****************************************************************************/
+/*
+ *
+ */
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = 0; \
+ __chk_user_ptr(ptr); \
+ \
+ switch (sizeof(*(ptr))) { \
+ case 1: { \
+ unsigned char __gu_val; \
+ __get_user_asm(__gu_err, __gu_val, ptr, "ub", "=r"); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__gu_val; \
+ break; \
+ } \
+ case 2: { \
+ unsigned short __gu_val; \
+ __get_user_asm(__gu_err, __gu_val, ptr, "uh", "=r"); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__gu_val; \
+ break; \
+ } \
+ case 4: { \
+ unsigned int __gu_val; \
+ __get_user_asm(__gu_err, __gu_val, ptr, "", "=r"); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__gu_val; \
+ break; \
+ } \
+ case 8: { \
+ unsigned long long __gu_val; \
+ __get_user_asm(__gu_err, __gu_val, ptr, "d", "=e"); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__gu_val; \
+ break; \
+ } \
+ default: \
+ __gu_err = __get_user_bad(); \
+ break; \
+ } \
+ __gu_err; \
+})
+
+#define get_user(x, ptr) \
+({ \
+ const typeof(*(ptr)) __user *_p = (ptr);\
+ int _e; \
+ \
+ _e = __range_ok(_p, sizeof(*_p)); \
+ if (likely(_e == 0)) \
+ _e = __get_user((x), _p); \
+ else \
+ (x) = (typeof(x)) 0; \
+ _e; \
+})
+
+extern int __get_user_bad(void);
+
+#ifdef CONFIG_MMU
+
+#define __get_user_asm(err,x,ptr,dtype,constraint) \
+do { \
+ asm("1: ld"dtype"%I2 %M2,%1 \n" \
+ "2: \n" \
+ ".subsection 2 \n" \
+ "3: setlos %3,%0 \n" \
+ " setlos #0,%1 \n" \
+ " bra 2b \n" \
+ ".previous \n" \
+ ".section __ex_table,\"a\" \n" \
+ " .balign 8 \n" \
+ " .long 1b,3b \n" \
+ ".previous" \
+ : "=r" (err), constraint (x) \
+ : "m" (*__ptr(ptr)), "i"(-EFAULT), "0"(err) \
+ ); \
+} while(0)
+
+#else
+
+#define __get_user_asm(err,x,ptr,bwl,con) \
+ asm(" ld"bwl"%I1 %M1,%0 \n" \
+ " membar \n" \
+ : con(x) \
+ : "m" (*__ptr(ptr)))
+
+#endif
+
+/*****************************************************************************/
+/*
+ *
+ */
+#define ____force(x) (__force void *)(void __user *)(x)
+#ifdef CONFIG_MMU
+extern long __memset_user(void *dst, unsigned long count);
+extern long __memcpy_user(void *dst, const void *src, unsigned long count);
+
+#define clear_user(dst,count) __memset_user(____force(dst), (count))
+#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
+#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))
+
+#else
+
+#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
+#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
+#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)
+
+#endif
+
+#define __clear_user clear_user
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ might_sleep();
+ return __copy_to_user_inatomic(to, from, n);
+}
+
+static inline unsigned long
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ might_sleep();
+ return __copy_from_user_inatomic(to, from, n);
+}
+
+static inline long copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long ret = n;
+
+ if (likely(__access_ok(from, n)))
+ ret = __copy_from_user(to, from, n);
+
+ if (unlikely(ret != 0))
+ memset(to + (n - ret), 0, ret);
+
+ return ret;
+}
+
+static inline long copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return likely(__access_ok(to, n)) ? __copy_to_user(to, from, n) : n;
+}
+
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern long strnlen_user(const char __user *src, long count);
+
+#define strlen_user(str) strnlen_user(str, 32767)
+
+extern unsigned long search_exception_table(unsigned long addr);
+
+#endif /* _ASM_UACCESS_H */
diff --git a/arch/frv/include/asm/ucontext.h b/arch/frv/include/asm/ucontext.h
new file mode 100644
index 00000000000..8d8c0c94800
--- /dev/null
+++ b/arch/frv/include/asm/ucontext.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_UCONTEXT_H
+#define _ASM_UCONTEXT_H
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif
diff --git a/arch/frv/include/asm/unaligned.h b/arch/frv/include/asm/unaligned.h
new file mode 100644
index 00000000000..6c61c05b2e0
--- /dev/null
+++ b/arch/frv/include/asm/unaligned.h
@@ -0,0 +1,22 @@
+/* unaligned.h: unaligned access handler
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNALIGNED_H
+#define _ASM_UNALIGNED_H
+
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/generic.h>
+
+#define get_unaligned __get_unaligned_be
+#define put_unaligned __put_unaligned_be
+
+#endif /* _ASM_UNALIGNED_H */
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
new file mode 100644
index 00000000000..a569dff7cd5
--- /dev/null
+++ b/arch/frv/include/asm/unistd.h
@@ -0,0 +1,388 @@
+#ifndef _ASM_UNISTD_H_
+#define _ASM_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_restart_syscall 0
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lchown 16
+#define __NR_break 17
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_stty 31
+#define __NR_gtty 32
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_ftime 35
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_prof 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+#define __NR_lock 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_mpx 56
+#define __NR_setpgid 57
+#define __NR_ulimit 58
+// #define __NR_oldolduname /* 59 */ obsolete
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+// #define __NR_mmap 90 /* obsolete - not implemented */
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+// #define __NR_profil /* 98 */ obsolete
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+// #define __NR_ioperm /* 101 */ not supported
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+// #define __NR_olduname /* 109 */ obsolete
+// #define __NR_iopl /* 110 */ not supported
+#define __NR_vhangup 111
+// #define __NR_idle /* 112 */ Obsolete
+// #define __NR_vm86old /* 113 */ not supported
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+// #define __NR_modify_ldt /* 123 */ not supported
+#define __NR_cacheflush 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+// #define __NR_vm86 /* 166 */ not supported
+#define __NR_query_module 167
+#define __NR_poll 168
+#define __NR_nfsservctl 169
+#define __NR_setresgid 170
+#define __NR_getresgid 171
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread64 180
+#define __NR_pwrite64 181
+#define __NR_chown 182
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+#define __NR_getpmsg 188 /* some people actually want streams */
+#define __NR_putpmsg 189 /* some people actually want streams */
+#define __NR_vfork 190
+#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_pivot_root 217
+#define __NR_mincore 218
+#define __NR_madvise 219
+
+#define __NR_getdents64 220
+#define __NR_fcntl64 221
+#define __NR_security 223 /* syscall for security modules */
+#define __NR_gettid 224
+#define __NR_readahead 225
+#define __NR_setxattr 226
+#define __NR_lsetxattr 227
+#define __NR_fsetxattr 228
+#define __NR_getxattr 229
+#define __NR_lgetxattr 230
+#define __NR_fgetxattr 231
+#define __NR_listxattr 232
+#define __NR_llistxattr 233
+#define __NR_flistxattr 234
+#define __NR_removexattr 235
+#define __NR_lremovexattr 236
+#define __NR_fremovexattr 237
+#define __NR_tkill 238
+#define __NR_sendfile64 239
+#define __NR_futex 240
+#define __NR_sched_setaffinity 241
+#define __NR_sched_getaffinity 242
+#define __NR_set_thread_area 243
+#define __NR_get_thread_area 244
+#define __NR_io_setup 245
+#define __NR_io_destroy 246
+#define __NR_io_getevents 247
+#define __NR_io_submit 248
+#define __NR_io_cancel 249
+#define __NR_fadvise64 250
+
+#define __NR_exit_group 252
+#define __NR_lookup_dcookie 253
+#define __NR_epoll_create 254
+#define __NR_epoll_ctl 255
+#define __NR_epoll_wait 256
+#define __NR_remap_file_pages 257
+#define __NR_set_tid_address 258
+#define __NR_timer_create 259
+#define __NR_timer_settime (__NR_timer_create+1)
+#define __NR_timer_gettime (__NR_timer_create+2)
+#define __NR_timer_getoverrun (__NR_timer_create+3)
+#define __NR_timer_delete (__NR_timer_create+4)
+#define __NR_clock_settime (__NR_timer_create+5)
+#define __NR_clock_gettime (__NR_timer_create+6)
+#define __NR_clock_getres (__NR_timer_create+7)
+#define __NR_clock_nanosleep (__NR_timer_create+8)
+#define __NR_statfs64 268
+#define __NR_fstatfs64 269
+#define __NR_tgkill 270
+#define __NR_utimes 271
+#define __NR_fadvise64_64 272
+#define __NR_vserver 273
+#define __NR_mbind 274
+#define __NR_get_mempolicy 275
+#define __NR_set_mempolicy 276
+#define __NR_mq_open 277
+#define __NR_mq_unlink (__NR_mq_open+1)
+#define __NR_mq_timedsend (__NR_mq_open+2)
+#define __NR_mq_timedreceive (__NR_mq_open+3)
+#define __NR_mq_notify (__NR_mq_open+4)
+#define __NR_mq_getsetattr (__NR_mq_open+5)
+#define __NR_kexec_load 283
+#define __NR_waitid 284
+/* #define __NR_sys_setaltroot 285 */
+#define __NR_add_key 286
+#define __NR_request_key 287
+#define __NR_keyctl 288
+#define __NR_ioprio_set 289
+#define __NR_ioprio_get 290
+#define __NR_inotify_init 291
+#define __NR_inotify_add_watch 292
+#define __NR_inotify_rm_watch 293
+#define __NR_migrate_pages 294
+#define __NR_openat 295
+#define __NR_mkdirat 296
+#define __NR_mknodat 297
+#define __NR_fchownat 298
+#define __NR_futimesat 299
+#define __NR_fstatat64 300
+#define __NR_unlinkat 301
+#define __NR_renameat 302
+#define __NR_linkat 303
+#define __NR_symlinkat 304
+#define __NR_readlinkat 305
+#define __NR_fchmodat 306
+#define __NR_faccessat 307
+#define __NR_pselect6 308
+#define __NR_ppoll 309
+#define __NR_unshare 310
+#define __NR_set_robust_list 311
+#define __NR_get_robust_list 312
+#define __NR_splice 313
+#define __NR_sync_file_range 314
+#define __NR_tee 315
+#define __NR_vmsplice 316
+#define __NR_move_pages 317
+#define __NR_getcpu 318
+#define __NR_epoll_pwait 319
+#define __NR_utimensat 320
+#define __NR_signalfd 321
+#define __NR_timerfd_create 322
+#define __NR_eventfd 323
+#define __NR_fallocate 324
+#define __NR_timerfd_settime 325
+#define __NR_timerfd_gettime 326
+#define __NR_signalfd4 327
+#define __NR_eventfd2 328
+#define __NR_epoll_create1 329
+#define __NR_dup3 330
+#define __NR_pipe2 331
+#define __NR_inotify_init1 332
+#define __NR_preadv 333
+#define __NR_pwritev 334
+#define __NR_rt_tgsigqueueinfo 335
+#define __NR_perf_event_open 336
+#define __NR_setns 337
+
+#ifdef __KERNEL__
+
+#define NR_syscalls 338
+
+#define __ARCH_WANT_IPC_PARSE_VERSION
+/* #define __ARCH_WANT_OLD_READDIR */
+#define __ARCH_WANT_OLD_STAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+/* #define __ARCH_WANT_SYS_GETHOSTNAME */
+#define __ARCH_WANT_SYS_IPC
+#define __ARCH_WANT_SYS_PAUSE
+/* #define __ARCH_WANT_SYS_SGETMASK */
+/* #define __ARCH_WANT_SYS_SIGNAL */
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+/* #define __ARCH_WANT_SYS_OLD_GETRLIMIT */
+#define __ARCH_WANT_SYS_OLDUMOUNT
+/* #define __ARCH_WANT_SYS_SIGPENDING */
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+#ifndef cond_syscall
+#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_UNISTD_H_ */
diff --git a/arch/frv/include/asm/user.h b/arch/frv/include/asm/user.h
new file mode 100644
index 00000000000..82fa8fab64a
--- /dev/null
+++ b/arch/frv/include/asm/user.h
@@ -0,0 +1,80 @@
+/* user.h: FR-V core file format stuff
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_USER_H
+#define _ASM_USER_H
+
+#include <asm/page.h>
+#include <asm/registers.h>
+
+/* Core file format: The core file is written in such a way that gdb
+ * can understand it and provide useful information to the user (under
+ * linux we use the 'trad-core' bfd). There are quite a number of
+ * obstacles to being able to view the contents of the floating point
+ * registers, and until these are solved you will not be able to view
+ * the contents of them. Actually, you can read in the core file and
+ * look at the contents of the user struct to find out what the
+ * floating point registers contain.
+ *
+ * The actual file contents are as follows:
+ * UPAGE:
+ * 1 page consisting of a user struct that tells gdb what is present
+ * in the file. Directly after this is a copy of the task_struct,
+ * which is currently not used by gdb, but it may come in useful at
+ * some point. All of the registers are stored as part of the
+ * upage. The upage should always be only one page.
+ *
+ * DATA:
+ * The data area is stored. We use current->end_text to
+ * current->brk to pick up all of the user variables, plus any
+ * memory that may have been malloced. No attempt is made to
+ * determine if a page is demand-zero or if a page is totally
+ * unused, we just cover the entire range. All of the addresses are
+ * rounded in such a way that an integral number of pages is
+ * written.
+ *
+ * STACK:
+ * We need the stack information in order to get a meaningful
+ * backtrace. We need to write the data from (esp) to
+ * current->start_stack, so we round each of these off in order to
+ * be able to write an integer number of pages. The minimum core
+ * file size is 3 pages, or 12288 bytes.
+ */
+
+/* When the kernel dumps core, it starts by dumping the user struct -
+ * this will be used by gdb to figure out where the data and stack segments
+ * are within the file, and what virtual addresses to use.
+ */
+struct user {
+ /* We start with the registers, to mimic the way that "memory" is returned
+ * from the ptrace(3,...) function. */
+ struct user_context regs;
+
+ /* The rest of this junk is to help gdb figure out what goes where */
+ unsigned long u_tsize; /* Text segment size (pages). */
+ unsigned long u_dsize; /* Data segment size (pages). */
+ unsigned long u_ssize; /* Stack segment size (pages). */
+ unsigned long start_code; /* Starting virtual address of text. */
+ unsigned long start_stack; /* Starting virtual address of stack area.
+ * This is actually the bottom of the stack,
+ * the top of the stack is always found in the
+ * esp register. */
+ long int signal; /* Signal that caused the core dump. */
+
+ unsigned long magic; /* To uniquely identify a core file */
+ char u_comm[32]; /* User command that was responsible */
+};
+
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif
diff --git a/arch/frv/include/asm/vga.h b/arch/frv/include/asm/vga.h
new file mode 100644
index 00000000000..a702c800a22
--- /dev/null
+++ b/arch/frv/include/asm/vga.h
@@ -0,0 +1,17 @@
+/* vga.h: VGA register stuff
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_VGA_H
+#define _ASM_VGA_H
+
+
+
+#endif /* _ASM_VGA_H */
diff --git a/arch/frv/include/asm/virtconvert.h b/arch/frv/include/asm/virtconvert.h
new file mode 100644
index 00000000000..b26d70ab911
--- /dev/null
+++ b/arch/frv/include/asm/virtconvert.h
@@ -0,0 +1,41 @@
+/* virtconvert.h: virtual/physical/page address conversion
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_VIRTCONVERT_H
+#define _ASM_VIRTCONVERT_H
+
+/*
+ * Macros used for converting between virtual and physical mappings.
+ */
+
+#ifdef __KERNEL__
+
+#include <asm/setup.h>
+
+#ifdef CONFIG_MMU
+
+#define phys_to_virt(vaddr) ((void *) ((unsigned long)(vaddr) + PAGE_OFFSET))
+#define virt_to_phys(vaddr) ((unsigned long) (vaddr) - PAGE_OFFSET)
+
+#else
+
+#define phys_to_virt(vaddr) ((void *) (vaddr))
+#define virt_to_phys(vaddr) ((unsigned long) (vaddr))
+
+#endif
+
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
+#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
+
+#endif
+#endif
diff --git a/arch/frv/include/asm/xor.h b/arch/frv/include/asm/xor.h
new file mode 100644
index 00000000000..c82eb12a5b1
--- /dev/null
+++ b/arch/frv/include/asm/xor.h
@@ -0,0 +1 @@
+#include <asm-generic/xor.h>
diff --git a/arch/frv/kernel/Makefile b/arch/frv/kernel/Makefile
new file mode 100644
index 00000000000..c36f70b6699
--- /dev/null
+++ b/arch/frv/kernel/Makefile
@@ -0,0 +1,23 @@
+#
+# Makefile for the linux kernel.
+#
+
+heads-y := head-uc-fr401.o head-uc-fr451.o head-uc-fr555.o
+heads-$(CONFIG_MMU) := head-mmu-fr451.o
+
+extra-y:= head.o init_task.o vmlinux.lds
+
+obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \
+ kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \
+ sys_frv.o time.o setup.o frv_ksyms.o \
+ debug-stub.o irq.o sleep.o uaccess.o
+
+obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o
+
+obj-$(CONFIG_MB93091_VDK) += irq-mb93091.o
+obj-$(CONFIG_PM) += pm.o cmode.o
+obj-$(CONFIG_MB93093_PDK) += pm-mb93093.o
+obj-$(CONFIG_FUJITSU_MB93493) += irq-mb93493.o
+obj-$(CONFIG_SYSCTL) += sysctl.o
+obj-$(CONFIG_FUTEX) += futex.o
+obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/frv/kernel/asm-offsets.c b/arch/frv/kernel/asm-offsets.c
new file mode 100644
index 00000000000..9de96843a27
--- /dev/null
+++ b/arch/frv/kernel/asm-offsets.c
@@ -0,0 +1,108 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/personality.h>
+#include <linux/kbuild.h>
+#include <asm/registers.h>
+#include <asm/ucontext.h>
+#include <asm/processor.h>
+#include <asm/thread_info.h>
+#include <asm/gdb-stub.h>
+
+#define DEF_PTREG(sym, reg) \
+ asm volatile("\n->" #sym " %0 offsetof(struct pt_regs, " #reg ")" \
+ : : "i" (offsetof(struct pt_regs, reg)))
+
+#define DEF_IREG(sym, reg) \
+ asm volatile("\n->" #sym " %0 offsetof(struct user_context, " #reg ")" \
+ : : "i" (offsetof(struct user_context, reg)))
+
+#define DEF_FREG(sym, reg) \
+ asm volatile("\n->" #sym " %0 offsetof(struct user_context, " #reg ")" \
+ : : "i" (offsetof(struct user_context, reg)))
+
+#define DEF_0REG(sym, reg) \
+ asm volatile("\n->" #sym " %0 offsetof(struct frv_frame0, " #reg ")" \
+ : : "i" (offsetof(struct frv_frame0, reg)))
+
+void foo(void)
+{
+ /* offsets into the thread_info structure */
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_STATUS, thread_info, status);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PREEMPT_COUNT, thread_info, preempt_count);
+ OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
+ OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
+ BLANK();
+
+ /* offsets into register file storage */
+ DEF_PTREG(REG_PSR, psr);
+ DEF_PTREG(REG_ISR, isr);
+ DEF_PTREG(REG_CCR, ccr);
+ DEF_PTREG(REG_CCCR, cccr);
+ DEF_PTREG(REG_LR, lr);
+ DEF_PTREG(REG_LCR, lcr);
+ DEF_PTREG(REG_PC, pc);
+ DEF_PTREG(REG__STATUS, __status);
+ DEF_PTREG(REG_SYSCALLNO, syscallno);
+ DEF_PTREG(REG_ORIG_GR8, orig_gr8);
+ DEF_PTREG(REG_GNER0, gner0);
+ DEF_PTREG(REG_GNER1, gner1);
+ DEF_PTREG(REG_IACC0, iacc0);
+ DEF_PTREG(REG_TBR, tbr);
+ DEF_PTREG(REG_GR0, tbr);
+ DEFINE(REG__END, sizeof(struct pt_regs));
+ BLANK();
+
+ DEF_0REG(REG_DCR, debug.dcr);
+ DEF_0REG(REG_IBAR0, debug.ibar[0]);
+ DEF_0REG(REG_DBAR0, debug.dbar[0]);
+ DEF_0REG(REG_DBDR00, debug.dbdr[0][0]);
+ DEF_0REG(REG_DBMR00, debug.dbmr[0][0]);
+ BLANK();
+
+ DEF_IREG(__INT_GR0, i.gr[0]);
+ DEF_FREG(__USER_FPMEDIA, f);
+ DEF_FREG(__FPMEDIA_FR0, f.fr[0]);
+ DEF_FREG(__FPMEDIA_FNER0, f.fner[0]);
+ DEF_FREG(__FPMEDIA_MSR0, f.msr[0]);
+ DEF_FREG(__FPMEDIA_ACC0, f.acc[0]);
+ DEF_FREG(__FPMEDIA_ACCG0, f.accg[0]);
+ DEF_FREG(__FPMEDIA_FSR0, f.fsr[0]);
+ BLANK();
+
+ DEFINE(NR_PT_REGS, sizeof(struct pt_regs) / 4);
+ DEFINE(NR_USER_INT_REGS, sizeof(struct user_int_regs) / 4);
+ DEFINE(NR_USER_FPMEDIA_REGS, sizeof(struct user_fpmedia_regs) / 4);
+ DEFINE(NR_USER_CONTEXT, sizeof(struct user_context) / 4);
+ DEFINE(FRV_FRAME0_SIZE, sizeof(struct frv_frame0));
+ BLANK();
+
+ /* offsets into thread_struct */
+ OFFSET(__THREAD_FRAME, thread_struct, frame);
+ OFFSET(__THREAD_CURR, thread_struct, curr);
+ OFFSET(__THREAD_SP, thread_struct, sp);
+ OFFSET(__THREAD_FP, thread_struct, fp);
+ OFFSET(__THREAD_LR, thread_struct, lr);
+ OFFSET(__THREAD_PC, thread_struct, pc);
+ OFFSET(__THREAD_GR16, thread_struct, gr[0]);
+ OFFSET(__THREAD_SCHED_LR, thread_struct, sched_lr);
+ OFFSET(__THREAD_FRAME0, thread_struct, frame0);
+ OFFSET(__THREAD_USER, thread_struct, user);
+ BLANK();
+
+ /* offsets into frv_debug_status */
+ OFFSET(DEBUG_BPSR, frv_debug_status, bpsr);
+ OFFSET(DEBUG_DCR, frv_debug_status, dcr);
+ OFFSET(DEBUG_BRR, frv_debug_status, brr);
+ OFFSET(DEBUG_NMAR, frv_debug_status, nmar);
+ BLANK();
+}
diff --git a/arch/frv/kernel/break.S b/arch/frv/kernel/break.S
new file mode 100644
index 00000000000..cbb6958a314
--- /dev/null
+++ b/arch/frv/kernel/break.S
@@ -0,0 +1,792 @@
+/* break.S: Break interrupt handling (kept separate from entry.S)
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <asm/setup.h>
+#include <asm/segment.h>
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+#include <asm/spr-regs.h>
+
+#include <asm/errno.h>
+
+#
+# the break handler has its own stack
+#
+ .section .bss..stack
+ .globl __break_user_context
+ .balign THREAD_SIZE
+__break_stack:
+ .space THREAD_SIZE - FRV_FRAME0_SIZE
+__break_frame_0:
+ .space FRV_FRAME0_SIZE
+
+#
+# miscellaneous variables
+#
+ .section .bss
+#ifdef CONFIG_MMU
+ .globl __break_tlb_miss_real_return_info
+__break_tlb_miss_real_return_info:
+ .balign 8
+ .space 2*4 /* saved PCSR, PSR for TLB-miss handler fixup */
+#endif
+
+__break_trace_through_exceptions:
+ .space 4
+
+#define CS2_ECS1 0xe1200000
+#define CS2_USERLED 0x4
+
+.macro LEDS val,reg
+# sethi.p %hi(CS2_ECS1+CS2_USERLED),gr30
+# setlo %lo(CS2_ECS1+CS2_USERLED),gr30
+# setlos #~\val,\reg
+# st \reg,@(gr30,gr0)
+# setlos #0x5555,\reg
+# sethi.p %hi(0xffc00100),gr30
+# setlo %lo(0xffc00100),gr30
+# sth \reg,@(gr30,gr0)
+# membar
+.endm
+
+###############################################################################
+#
+# entry point for Break Exceptions/Interrupts
+#
+###############################################################################
+ .section .text..break
+ .balign 4
+ .globl __entry_break
+__entry_break:
+#ifdef CONFIG_MMU
+ movgs gr31,scr3
+#endif
+ LEDS 0x1001,gr31
+
+ sethi.p %hi(__break_frame_0),gr31
+ setlo %lo(__break_frame_0),gr31
+
+ stdi gr2,@(gr31,#REG_GR(2))
+ movsg ccr,gr3
+ sti gr3,@(gr31,#REG_CCR)
+
+ # catch the return from a TLB-miss handler that had single-step disabled
+ # traps will be enabled, so we have to do this now
+#ifdef CONFIG_MMU
+ movsg bpcsr,gr3
+ sethi.p %hi(__break_tlb_miss_return_breaks_here),gr2
+ setlo %lo(__break_tlb_miss_return_breaks_here),gr2
+ subcc gr2,gr3,gr0,icc0
+ beq icc0,#2,__break_return_singlestep_tlbmiss
+#endif
+
+ # determine whether we have stepped through into an exception
+ # - we need to take special action to suspend h/w single stepping if we've done
+ # that, so that the gdbstub doesn't get bogged down endlessly stepping through
+ # external interrupt handling
+ movsg bpsr,gr3
+ andicc gr3,#BPSR_BET,gr0,icc0
+ bne icc0,#2,__break_maybe_userspace /* jump if PSR.ET was 1 */
+
+ LEDS 0x1003,gr2
+
+ movsg brr,gr3
+ andicc gr3,#BRR_ST,gr0,icc0
+ andicc.p gr3,#BRR_SB,gr0,icc1
+ bne icc0,#2,__break_step /* jump if single-step caused break */
+ beq icc1,#2,__break_continue /* jump if BREAK didn't cause break */
+
+ LEDS 0x1007,gr2
+
+ # handle special breaks
+ movsg bpcsr,gr3
+
+ sethi.p %hi(__entry_return_singlestep_breaks_here),gr2
+ setlo %lo(__entry_return_singlestep_breaks_here),gr2
+ subcc gr2,gr3,gr0,icc0
+ beq icc0,#2,__break_return_singlestep
+
+ bra __break_continue
+
+
+###############################################################################
+#
+# handle BREAK instruction in kernel-mode exception epilogue
+#
+###############################################################################
+__break_return_singlestep:
+ LEDS 0x100f,gr2
+
+ # special break insn requests single-stepping to be turned back on
+ # HERE RETT
+ # PSR.ET 0 0
+ # PSR.PS old PSR.S ?
+ # PSR.S 1 1
+ # BPSR.ET 0 1 (can't have caused orig excep otherwise)
+ # BPSR.BS 1 old PSR.S
+ movsg dcr,gr2
+ sethi.p %hi(DCR_SE),gr3
+ setlo %lo(DCR_SE),gr3
+ or gr2,gr3,gr2
+ movgs gr2,dcr
+
+ movsg psr,gr2
+ andi gr2,#PSR_PS,gr2
+ slli gr2,#11,gr2 /* PSR.PS -> BPSR.BS */
+ ori gr2,#BPSR_BET,gr2 /* 1 -> BPSR.BET */
+ movgs gr2,bpsr
+
+ # return to the invoker of the original kernel exception
+ movsg pcsr,gr2
+ movgs gr2,bpcsr
+
+ LEDS 0x101f,gr2
+
+ ldi @(gr31,#REG_CCR),gr3
+ movgs gr3,ccr
+ lddi.p @(gr31,#REG_GR(2)),gr2
+ xor gr31,gr31,gr31
+ movgs gr0,brr
+#ifdef CONFIG_MMU
+ movsg scr3,gr31
+#endif
+ rett #1
+
+###############################################################################
+#
+# handle BREAK instruction in TLB-miss handler return path
+#
+###############################################################################
+#ifdef CONFIG_MMU
+__break_return_singlestep_tlbmiss:
+ LEDS 0x1100,gr2
+
+ sethi.p %hi(__break_tlb_miss_real_return_info),gr3
+ setlo %lo(__break_tlb_miss_real_return_info),gr3
+ lddi @(gr3,#0),gr2
+ movgs gr2,pcsr
+ movgs gr3,psr
+
+ bra __break_return_singlestep
+#endif
+
+
+###############################################################################
+#
+# handle single stepping into an exception prologue from kernel mode
+# - we try and catch it whilst it is still in the main vector table
+# - if we catch it there, we have to jump to the fixup handler
+# - there is a fixup table that has a pointer for every 16b slot in the trap
+# table
+#
+###############################################################################
+__break_step:
+ LEDS 0x2003,gr2
+
+ # external interrupts seem to escape from the trap table before single
+ # step catches up with them
+ movsg bpcsr,gr2
+ sethi.p %hi(__entry_kernel_external_interrupt),gr3
+ setlo %lo(__entry_kernel_external_interrupt),gr3
+ subcc.p gr2,gr3,gr0,icc0
+ sethi %hi(__entry_uspace_external_interrupt),gr3
+ setlo.p %lo(__entry_uspace_external_interrupt),gr3
+ beq icc0,#2,__break_step_kernel_external_interrupt
+ subcc.p gr2,gr3,gr0,icc0
+ sethi %hi(__entry_kernel_external_interrupt_virtually_disabled),gr3
+ setlo.p %lo(__entry_kernel_external_interrupt_virtually_disabled),gr3
+ beq icc0,#2,__break_step_uspace_external_interrupt
+ subcc.p gr2,gr3,gr0,icc0
+ sethi %hi(__entry_kernel_external_interrupt_virtual_reenable),gr3
+ setlo.p %lo(__entry_kernel_external_interrupt_virtual_reenable),gr3
+ beq icc0,#2,__break_step_kernel_external_interrupt_virtually_disabled
+ subcc gr2,gr3,gr0,icc0
+ beq icc0,#2,__break_step_kernel_external_interrupt_virtual_reenable
+
+ LEDS 0x2007,gr2
+
+ # the two main vector tables are adjacent on one 8Kb slab
+ movsg bpcsr,gr2
+ setlos #0xffffe000,gr3
+ and gr2,gr3,gr2
+ sethi.p %hi(__trap_tables),gr3
+ setlo %lo(__trap_tables),gr3
+ subcc gr2,gr3,gr0,icc0
+ bne icc0,#2,__break_continue
+
+ LEDS 0x200f,gr2
+
+ # skip workaround if so requested by GDB
+ sethi.p %hi(__break_trace_through_exceptions),gr3
+ setlo %lo(__break_trace_through_exceptions),gr3
+ ld @(gr3,gr0),gr3
+ subcc gr3,gr0,gr0,icc0
+ bne icc0,#0,__break_continue
+
+ LEDS 0x201f,gr2
+
+ # access the fixup table - there's a 1:1 mapping between the slots in the trap tables and
+ # the slots in the trap fixup tables allowing us to simply divide the offset into the
+ # former by 4 to access the latter
+ sethi.p %hi(__trap_tables),gr3
+ setlo %lo(__trap_tables),gr3
+ movsg bpcsr,gr2
+ sub gr2,gr3,gr2
+ srli.p gr2,#2,gr2
+
+ sethi %hi(__trap_fixup_tables),gr3
+ setlo.p %lo(__trap_fixup_tables),gr3
+ andi gr2,#~3,gr2
+ ld @(gr2,gr3),gr2
+ jmpil @(gr2,#0)
+
+# step through an internal exception from kernel mode
+ .globl __break_step_kernel_softprog_interrupt
+__break_step_kernel_softprog_interrupt:
+ sethi.p %hi(__entry_kernel_softprog_interrupt_reentry),gr3
+ setlo %lo(__entry_kernel_softprog_interrupt_reentry),gr3
+ bra __break_return_as_kernel_prologue
+
+# step through an external interrupt from kernel mode
+ .globl __break_step_kernel_external_interrupt
+__break_step_kernel_external_interrupt:
+ # deal with virtual interrupt disablement
+ beq icc2,#0,__break_step_kernel_external_interrupt_virtually_disabled
+
+ sethi.p %hi(__entry_kernel_external_interrupt_reentry),gr3
+ setlo %lo(__entry_kernel_external_interrupt_reentry),gr3
+
+__break_return_as_kernel_prologue:
+ LEDS 0x203f,gr2
+
+ movgs gr3,bpcsr
+
+ # do the bit we had to skip
+#ifdef CONFIG_MMU
+ movsg ear0,gr2 /* EAR0 can get clobbered by gdb-stub (ICI/ICEI) */
+ movgs gr2,scr2
+#endif
+
+ or.p sp,gr0,gr2 /* set up the stack pointer */
+ subi sp,#REG__END,sp
+ sti.p gr2,@(sp,#REG_SP)
+
+ setlos #REG__STATUS_STEP,gr2
+ sti gr2,@(sp,#REG__STATUS) /* record single step status */
+
+ # cancel single-stepping mode
+ movsg dcr,gr2
+ sethi.p %hi(~DCR_SE),gr3
+ setlo %lo(~DCR_SE),gr3
+ and gr2,gr3,gr2
+ movgs gr2,dcr
+
+ LEDS 0x207f,gr2
+
+ ldi @(gr31,#REG_CCR),gr3
+ movgs gr3,ccr
+ lddi.p @(gr31,#REG_GR(2)),gr2
+ xor gr31,gr31,gr31
+ movgs gr0,brr
+#ifdef CONFIG_MMU
+ movsg scr3,gr31
+#endif
+ rett #1
+
+# we single-stepped into an interrupt handler whilst interrupts were merely virtually disabled
+# need to really disable interrupts, set flag, fix up and return
+__break_step_kernel_external_interrupt_virtually_disabled:
+ movsg psr,gr2
+ andi gr2,#~PSR_PIL,gr2
+ ori gr2,#PSR_PIL_14,gr2 /* debugging interrupts only */
+ movgs gr2,psr
+
+ ldi @(gr31,#REG_CCR),gr3
+ movgs gr3,ccr
+ subcc.p gr0,gr0,gr0,icc2 /* leave Z set, clear C */
+
+ # exceptions must've been enabled and we must've been in supervisor mode
+ setlos BPSR_BET|BPSR_BS,gr3
+ movgs gr3,bpsr
+
+ # return to where the interrupt happened
+ movsg pcsr,gr2
+ movgs gr2,bpcsr
+
+ lddi.p @(gr31,#REG_GR(2)),gr2
+
+ xor gr31,gr31,gr31
+ movgs gr0,brr
+#ifdef CONFIG_MMU
+ movsg scr3,gr31
+#endif
+ rett #1
+
+# we stepped through into the virtual interrupt reenablement trap
+#
+# we also want to single step anyway, but after fixing up so that we get an event on the
+# instruction after the broken-into exception returns
+ .globl __break_step_kernel_external_interrupt_virtual_reenable
+__break_step_kernel_external_interrupt_virtual_reenable:
+ movsg psr,gr2
+ andi gr2,#~PSR_PIL,gr2
+ movgs gr2,psr
+
+ ldi @(gr31,#REG_CCR),gr3
+ movgs gr3,ccr
+ subicc gr0,#1,gr0,icc2 /* clear Z, set C */
+
+ # save the adjusted ICC2
+ movsg ccr,gr3
+ sti gr3,@(gr31,#REG_CCR)
+
+ # exceptions must've been enabled and we must've been in supervisor mode
+ setlos BPSR_BET|BPSR_BS,gr3
+ movgs gr3,bpsr
+
+ # return to where the trap happened
+ movsg pcsr,gr2
+ movgs gr2,bpcsr
+
+ # and then process the single step
+ bra __break_continue
+
+# step through an internal exception from uspace mode
+ .globl __break_step_uspace_softprog_interrupt
+__break_step_uspace_softprog_interrupt:
+ sethi.p %hi(__entry_uspace_softprog_interrupt_reentry),gr3
+ setlo %lo(__entry_uspace_softprog_interrupt_reentry),gr3
+ bra __break_return_as_uspace_prologue
+
+# step through an external interrupt from kernel mode
+ .globl __break_step_uspace_external_interrupt
+__break_step_uspace_external_interrupt:
+ sethi.p %hi(__entry_uspace_external_interrupt_reentry),gr3
+ setlo %lo(__entry_uspace_external_interrupt_reentry),gr3
+
+__break_return_as_uspace_prologue:
+ LEDS 0x20ff,gr2
+
+ movgs gr3,bpcsr
+
+ # do the bit we had to skip
+ sethi.p %hi(__kernel_frame0_ptr),gr28
+ setlo %lo(__kernel_frame0_ptr),gr28
+ ldi.p @(gr28,#0),gr28
+
+ setlos #REG__STATUS_STEP,gr2
+ sti gr2,@(gr28,#REG__STATUS) /* record single step status */
+
+ # cancel single-stepping mode
+ movsg dcr,gr2
+ sethi.p %hi(~DCR_SE),gr3
+ setlo %lo(~DCR_SE),gr3
+ and gr2,gr3,gr2
+ movgs gr2,dcr
+
+ LEDS 0x20fe,gr2
+
+ ldi @(gr31,#REG_CCR),gr3
+ movgs gr3,ccr
+ lddi.p @(gr31,#REG_GR(2)),gr2
+ xor gr31,gr31,gr31
+ movgs gr0,brr
+#ifdef CONFIG_MMU
+ movsg scr3,gr31
+#endif
+ rett #1
+
+#ifdef CONFIG_MMU
+# step through an ITLB-miss handler from user mode
+ .globl __break_user_insn_tlb_miss
+__break_user_insn_tlb_miss:
+ # we'll want to try the trap stub again
+ sethi.p %hi(__trap_user_insn_tlb_miss),gr2
+ setlo %lo(__trap_user_insn_tlb_miss),gr2
+ movgs gr2,bpcsr
+
+__break_tlb_miss_common:
+ LEDS 0x2101,gr2
+
+ # cancel single-stepping mode
+ movsg dcr,gr2
+ sethi.p %hi(~DCR_SE),gr3
+ setlo %lo(~DCR_SE),gr3
+ and gr2,gr3,gr2
+ movgs gr2,dcr
+
+ # we'll swap the real return address for one with a BREAK insn so that we can re-enable
+ # single stepping on return
+ movsg pcsr,gr2
+ sethi.p %hi(__break_tlb_miss_real_return_info),gr3
+ setlo %lo(__break_tlb_miss_real_return_info),gr3
+ sti gr2,@(gr3,#0)
+
+ sethi.p %hi(__break_tlb_miss_return_break),gr2
+ setlo %lo(__break_tlb_miss_return_break),gr2
+ movgs gr2,pcsr
+
+ # we also have to fudge PSR because the return BREAK is in kernel space and we want
+ # to get a BREAK fault not an access violation should the return be to userspace
+ movsg psr,gr2
+ sti.p gr2,@(gr3,#4)
+ ori gr2,#PSR_PS,gr2
+ movgs gr2,psr
+
+ LEDS 0x2102,gr2
+
+ ldi @(gr31,#REG_CCR),gr3
+ movgs gr3,ccr
+ lddi @(gr31,#REG_GR(2)),gr2
+ movsg scr3,gr31
+ movgs gr0,brr
+ rett #1
+
+# step through a DTLB-miss handler from user mode
+ .globl __break_user_data_tlb_miss
+__break_user_data_tlb_miss:
+ # we'll want to try the trap stub again
+ sethi.p %hi(__trap_user_data_tlb_miss),gr2
+ setlo %lo(__trap_user_data_tlb_miss),gr2
+ movgs gr2,bpcsr
+ bra __break_tlb_miss_common
+
+# step through an ITLB-miss handler from kernel mode
+ .globl __break_kernel_insn_tlb_miss
+__break_kernel_insn_tlb_miss:
+ # we'll want to try the trap stub again
+ sethi.p %hi(__trap_kernel_insn_tlb_miss),gr2
+ setlo %lo(__trap_kernel_insn_tlb_miss),gr2
+ movgs gr2,bpcsr
+ bra __break_tlb_miss_common
+
+# step through a DTLB-miss handler from kernel mode
+ .globl __break_kernel_data_tlb_miss
+__break_kernel_data_tlb_miss:
+ # we'll want to try the trap stub again
+ sethi.p %hi(__trap_kernel_data_tlb_miss),gr2
+ setlo %lo(__trap_kernel_data_tlb_miss),gr2
+ movgs gr2,bpcsr
+ bra __break_tlb_miss_common
+#endif
+
+###############################################################################
+#
+# handle debug events originating with userspace
+#
+###############################################################################
+__break_maybe_userspace:
+ LEDS 0x3003,gr2
+
+ setlos #BPSR_BS,gr2
+ andcc gr3,gr2,gr0,icc0
+ bne icc0,#0,__break_continue /* skip if PSR.S was 1 */
+
+ movsg brr,gr2
+ andicc gr2,#BRR_ST|BRR_SB,gr0,icc0
+ beq icc0,#0,__break_continue /* jump if not BREAK or single-step */
+
+ LEDS 0x3007,gr2
+
+ # do the first part of the exception prologue here
+ sethi.p %hi(__kernel_frame0_ptr),gr28
+ setlo %lo(__kernel_frame0_ptr),gr28
+ ldi @(gr28,#0),gr28
+ andi gr28,#~7,gr28
+
+ # set up the kernel stack pointer
+ sti sp ,@(gr28,#REG_SP)
+ ori gr28,0,sp
+ sti gr0 ,@(gr28,#REG_GR(28))
+
+ stdi gr20,@(gr28,#REG_GR(20))
+ stdi gr22,@(gr28,#REG_GR(22))
+
+ movsg tbr,gr20
+ movsg bpcsr,gr21
+ movsg psr,gr22
+
+ # determine the exception type and cancel single-stepping mode
+ or gr0,gr0,gr23
+
+ movsg dcr,gr2
+ sethi.p %hi(DCR_SE),gr3
+ setlo %lo(DCR_SE),gr3
+ andcc gr2,gr3,gr0,icc0
+ beq icc0,#0,__break_no_user_sstep /* must have been a BREAK insn */
+
+ not gr3,gr3
+ and gr2,gr3,gr2
+ movgs gr2,dcr
+ ori gr23,#REG__STATUS_STEP,gr23
+
+__break_no_user_sstep:
+ LEDS 0x300f,gr2
+
+ movsg brr,gr2
+ andi gr2,#BRR_ST|BRR_SB,gr2
+ slli gr2,#1,gr2
+ or gr23,gr2,gr23
+ sti.p gr23,@(gr28,#REG__STATUS) /* record single step status */
+
+ # adjust the value acquired from TBR - this indicates the exception
+ setlos #~TBR_TT,gr2
+ and.p gr20,gr2,gr20
+ setlos #TBR_TT_BREAK,gr2
+ or.p gr20,gr2,gr20
+
+ # fudge PSR.PS and BPSR.BS to return to kernel mode through the trap
+ # table as trap 126
+ andi gr22,#~PSR_PS,gr22 /* PSR.PS should be 0 */
+ movgs gr22,psr
+
+ setlos #BPSR_BS,gr2 /* BPSR.BS should be 1 and BPSR.BET 0 */
+ movgs gr2,bpsr
+
+ # return through remainder of the exception prologue
+ # - need to load gr23 with return handler address
+ sethi.p %hi(__entry_return_from_user_exception),gr23
+ setlo %lo(__entry_return_from_user_exception),gr23
+ sethi.p %hi(__entry_common),gr3
+ setlo %lo(__entry_common),gr3
+ movgs gr3,bpcsr
+
+ LEDS 0x301f,gr2
+
+ ldi @(gr31,#REG_CCR),gr3
+ movgs gr3,ccr
+ lddi.p @(gr31,#REG_GR(2)),gr2
+ xor gr31,gr31,gr31
+ movgs gr0,brr
+#ifdef CONFIG_MMU
+ movsg scr3,gr31
+#endif
+ rett #1
+
+###############################################################################
+#
+# resume normal debug-mode entry
+#
+###############################################################################
+__break_continue:
+ LEDS 0x4003,gr2
+
+ # set up the kernel stack pointer
+ sti sp,@(gr31,#REG_SP)
+
+ sethi.p %hi(__break_frame_0),sp
+ setlo %lo(__break_frame_0),sp
+
+ # finish building the exception frame
+ stdi gr4 ,@(gr31,#REG_GR(4))
+ stdi gr6 ,@(gr31,#REG_GR(6))
+ stdi gr8 ,@(gr31,#REG_GR(8))
+ stdi gr10,@(gr31,#REG_GR(10))
+ stdi gr12,@(gr31,#REG_GR(12))
+ stdi gr14,@(gr31,#REG_GR(14))
+ stdi gr16,@(gr31,#REG_GR(16))
+ stdi gr18,@(gr31,#REG_GR(18))
+ stdi gr20,@(gr31,#REG_GR(20))
+ stdi gr22,@(gr31,#REG_GR(22))
+ stdi gr24,@(gr31,#REG_GR(24))
+ stdi gr26,@(gr31,#REG_GR(26))
+ sti gr0 ,@(gr31,#REG_GR(28)) /* NULL frame pointer */
+ sti gr29,@(gr31,#REG_GR(29))
+ sti gr30,@(gr31,#REG_GR(30))
+ sti gr8 ,@(gr31,#REG_ORIG_GR8)
+
+#ifdef CONFIG_MMU
+ movsg scr3,gr19
+ sti gr19,@(gr31,#REG_GR(31))
+#endif
+
+ movsg bpsr ,gr19
+ movsg tbr ,gr20
+ movsg bpcsr,gr21
+ movsg psr ,gr22
+ movsg isr ,gr23
+ movsg cccr ,gr25
+ movsg lr ,gr26
+ movsg lcr ,gr27
+
+ andi.p gr22,#~(PSR_S|PSR_ET),gr5 /* rebuild PSR */
+ andi gr19,#PSR_ET,gr4
+ or.p gr4,gr5,gr5
+ srli gr19,#10,gr4
+ andi gr4,#PSR_S,gr4
+ or.p gr4,gr5,gr5
+
+ setlos #-1,gr6
+ sti gr20,@(gr31,#REG_TBR)
+ sti gr21,@(gr31,#REG_PC)
+ sti gr5 ,@(gr31,#REG_PSR)
+ sti gr23,@(gr31,#REG_ISR)
+ sti gr25,@(gr31,#REG_CCCR)
+ stdi gr26,@(gr31,#REG_LR)
+ sti gr6 ,@(gr31,#REG_SYSCALLNO)
+
+ # store CPU-specific regs
+ movsg iacc0h,gr4
+ movsg iacc0l,gr5
+ stdi gr4,@(gr31,#REG_IACC0)
+
+ movsg gner0,gr4
+ movsg gner1,gr5
+ stdi gr4,@(gr31,#REG_GNER0)
+
+ # build the debug register frame
+ movsg brr,gr4
+ movgs gr0,brr
+ movsg nmar,gr5
+ movsg dcr,gr6
+
+ sethi.p %hi(__debug_status),gr7
+ setlo %lo(__debug_status),gr7
+
+ stdi gr4 ,@(gr7,#DEBUG_BRR)
+ sti gr19,@(gr7,#DEBUG_BPSR)
+ sti.p gr6 ,@(gr7,#DEBUG_DCR)
+
+ # trap exceptions during break handling and disable h/w breakpoints/watchpoints
+ sethi %hi(DCR_EBE),gr5
+ setlo.p %lo(DCR_EBE),gr5
+ sethi %hi(__entry_breaktrap_table),gr4
+ setlo %lo(__entry_breaktrap_table),gr4
+ movgs gr5,dcr
+ movgs gr4,tbr
+
+ # set up kernel global registers
+ sethi.p %hi(__kernel_current_task),gr5
+ setlo %lo(__kernel_current_task),gr5
+ ld @(gr5,gr0),gr29
+ ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
+
+ sethi %hi(_gp),gr16
+ setlo.p %lo(_gp),gr16
+
+ # make sure we (the kernel) get div-zero and misalignment exceptions
+ setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
+ movgs gr5,isr
+
+ # enter the GDB stub
+ LEDS 0x4007,gr2
+
+ or.p gr0,gr0,fp
+ call debug_stub
+
+ LEDS 0x403f,gr2
+
+ # return from break
+ lddi @(gr31,#REG_IACC0),gr4
+ movgs gr4,iacc0h
+ movgs gr5,iacc0l
+
+ lddi @(gr31,#REG_GNER0),gr4
+ movgs gr4,gner0
+ movgs gr5,gner1
+
+ lddi @(gr31,#REG_LR) ,gr26
+ lddi @(gr31,#REG_CCR) ,gr24
+ lddi @(gr31,#REG_PSR) ,gr22
+ ldi @(gr31,#REG_PC) ,gr21
+ ldi @(gr31,#REG_TBR) ,gr20
+
+ sethi.p %hi(__debug_status),gr6
+ setlo %lo(__debug_status),gr6
+ ldi.p @(gr6,#DEBUG_DCR) ,gr6
+
+ andi gr22,#PSR_S,gr19 /* rebuild BPSR */
+ andi.p gr22,#PSR_ET,gr5
+ slli gr19,#10,gr19
+ or gr5,gr19,gr19
+
+ movgs gr6 ,dcr
+ movgs gr19,bpsr
+ movgs gr20,tbr
+ movgs gr21,bpcsr
+ movgs gr23,isr
+ movgs gr24,ccr
+ movgs gr25,cccr
+ movgs gr26,lr
+ movgs gr27,lcr
+
+ LEDS 0x407f,gr2
+
+#ifdef CONFIG_MMU
+ ldi @(gr31,#REG_GR(31)),gr2
+ movgs gr2,scr3
+#endif
+
+ ldi @(gr31,#REG_GR(30)),gr30
+ ldi @(gr31,#REG_GR(29)),gr29
+ lddi @(gr31,#REG_GR(26)),gr26
+ lddi @(gr31,#REG_GR(24)),gr24
+ lddi @(gr31,#REG_GR(22)),gr22
+ lddi @(gr31,#REG_GR(20)),gr20
+ lddi @(gr31,#REG_GR(18)),gr18
+ lddi @(gr31,#REG_GR(16)),gr16
+ lddi @(gr31,#REG_GR(14)),gr14
+ lddi @(gr31,#REG_GR(12)),gr12
+ lddi @(gr31,#REG_GR(10)),gr10
+ lddi @(gr31,#REG_GR(8)) ,gr8
+ lddi @(gr31,#REG_GR(6)) ,gr6
+ lddi @(gr31,#REG_GR(4)) ,gr4
+ lddi @(gr31,#REG_GR(2)) ,gr2
+ ldi.p @(gr31,#REG_SP) ,sp
+
+ xor gr31,gr31,gr31
+ movgs gr0,brr
+#ifdef CONFIG_MMU
+ movsg scr3,gr31
+#endif
+ rett #1
+
+###################################################################################################
+#
+# GDB stub "system calls"
+#
+###################################################################################################
+
+#ifdef CONFIG_GDBSTUB
+ # void gdbstub_console_write(struct console *con, const char *p, unsigned n)
+ .globl gdbstub_console_write
+gdbstub_console_write:
+ break
+ bralr
+#endif
+
+ # GDB stub BUG() trap
+ # GR8 is the proposed signal number
+ .globl __debug_bug_trap
+__debug_bug_trap:
+ break
+ bralr
+
+ # transfer kernel exeception to GDB for handling
+ .globl __break_hijack_kernel_event
+__break_hijack_kernel_event:
+ break
+ .globl __break_hijack_kernel_event_breaks_here
+__break_hijack_kernel_event_breaks_here:
+ nop
+
+#ifdef CONFIG_MMU
+ # handle a return from TLB-miss that requires single-step reactivation
+ .globl __break_tlb_miss_return_break
+__break_tlb_miss_return_break:
+ break
+__break_tlb_miss_return_breaks_here:
+ nop
+#endif
+
+ # guard the first .text label in the next file from confusion
+ nop
diff --git a/arch/frv/kernel/cmode.S b/arch/frv/kernel/cmode.S
new file mode 100644
index 00000000000..53deeb5d7e8
--- /dev/null
+++ b/arch/frv/kernel/cmode.S
@@ -0,0 +1,189 @@
+/* cmode.S: clock mode management
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Woodhouse (dwmw2@infradead.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/setup.h>
+#include <asm/segment.h>
+#include <asm/ptrace.h>
+#include <asm/errno.h>
+#include <asm/cache.h>
+#include <asm/spr-regs.h>
+
+#define __addr_MASK 0xfeff9820 /* interrupt controller mask */
+
+#define __addr_SDRAMC 0xfe000400 /* SDRAM controller regs */
+#define SDRAMC_DSTS 0x28 /* SDRAM status */
+#define SDRAMC_DSTS_SSI 0x00000001 /* indicates that the SDRAM is in self-refresh mode */
+#define SDRAMC_DRCN 0x30 /* SDRAM refresh control */
+#define SDRAMC_DRCN_SR 0x00000001 /* transition SDRAM into self-refresh mode */
+#define __addr_CLKC 0xfeff9a00
+#define CLKC_SWCMODE 0x00000008
+#define __addr_LEDS 0xe1200004
+
+.macro li v r
+ sethi.p %hi(\v),\r
+ setlo %lo(\v),\r
+.endm
+
+ .text
+ .balign 4
+
+
+###############################################################################
+#
+# Change CMODE
+# - void frv_change_cmode(int cmode)
+#
+###############################################################################
+ .globl frv_change_cmode
+ .type frv_change_cmode,@function
+
+.macro LEDS v
+#ifdef DEBUG_CMODE
+ setlos #~\v,gr10
+ sti gr10,@(gr11,#0)
+ membar
+#endif
+.endm
+
+frv_change_cmode:
+ movsg lr,gr9
+#ifdef DEBUG_CMODE
+ li __addr_LEDS,gr11
+#endif
+ dcef @(gr0,gr0),#1
+
+ # Shift argument left by 24 bits to fit in SWCMODE register later.
+ slli gr8,#24,gr8
+
+ # (1) Set '0' in the PSR.ET bit, and prohibit interrupts.
+ movsg psr,gr14
+ andi gr14,#~PSR_ET,gr3
+ movgs gr3,psr
+
+#if 0 // Fujitsu recommend to skip this and will update docs.
+ # (2) Set '0' to all bits of the MASK register of the interrupt
+ # controller, and mask interrupts.
+ li __addr_MASK,gr12
+ ldi @(gr12,#0),gr13
+ li 0xffff0000,gr4
+ sti gr4,@(gr12,#0)
+#endif
+
+ # (3) Stop the transfer function of DMAC. Stop all the bus masters
+ # to access SDRAM and the internal resources.
+
+ # (already done by caller)
+
+ # (4) Preload a series of following instructions to the instruction
+ # cache.
+ li #__cmode_icache_lock_start,gr3
+ li #__cmode_icache_lock_end,gr4
+
+1: icpl gr3,gr0,#1
+ addi gr3,#L1_CACHE_BYTES,gr3
+ cmp gr4,gr3,icc0
+ bhi icc0,#0,1b
+
+ # Set up addresses in regs for later steps.
+ setlos SDRAMC_DRCN_SR,gr3
+ li __addr_SDRAMC,gr4
+ li __addr_CLKC,gr5
+ ldi @(gr5,#0),gr6
+ li #0x80000000,gr7
+ or gr6,gr7,gr6
+
+ bra __cmode_icache_lock_start
+
+ .balign L1_CACHE_BYTES
+__cmode_icache_lock_start:
+
+ # (5) Flush the content of all caches by the DCEF instruction.
+ dcef @(gr0,gr0),#1
+
+ # (6) Execute loading the dummy for SDRAM.
+ ldi @(gr9,#0),gr0
+
+ # (7) Set '1' to the DRCN.SR bit, and change SDRAM to the
+ # self-refresh mode. Execute the dummy load to all memory
+ # devices set to cacheable on the external bus side in parallel
+ # with this.
+ sti gr3,@(gr4,#SDRAMC_DRCN)
+
+ # (8) Execute memory barrier instruction (MEMBAR).
+ membar
+
+ # (9) Read the DSTS register repeatedly until '1' stands in the
+ # DSTS.SSI field.
+1: ldi @(gr4,#SDRAMC_DSTS),gr3
+ andicc gr3,#SDRAMC_DSTS_SSI,gr3,icc0
+ beq icc0,#0,1b
+
+ # (10) Execute memory barrier instruction (MEMBAR).
+ membar
+
+#if 1
+ # (11) Set the value of CMODE that you want to change to
+ # SWCMODE.SWCM[3:0].
+ sti gr8,@(gr5,#CLKC_SWCMODE)
+
+ # (12) Set '1' to the CLKC.SWEN bit. In that case, do not change
+ # fields other than SWEN of the CLKC register.
+ sti gr6,@(gr5,#0)
+#endif
+ # (13) Execute the instruction just after the memory barrier
+ # instruction that executes the self-loop 256 times. (Meanwhile,
+ # the CMODE switch is done.)
+ membar
+ setlos #256,gr7
+2: subicc gr7,#1,gr7,icc0
+ bne icc0,#2,2b
+
+ LEDS 0x36
+
+ # (14) Release the self-refresh of SDRAM.
+ sti gr0,@(gr4,#SDRAMC_DRCN)
+
+ # Wait for it...
+3: ldi @(gr4,#SDRAMC_DSTS),gr3
+ andicc gr3,#SDRAMC_DSTS_SSI,gr3,icc0
+ bne icc0,#2,3b
+
+#if 0
+ li 0x0100000,gr10
+4: subicc gr10,#1,gr10,icc0
+
+ bne icc0,#0,4b
+#endif
+
+__cmode_icache_lock_end:
+
+ li #__cmode_icache_lock_start,gr3
+ li #__cmode_icache_lock_end,gr4
+
+4: icul gr3
+ addi gr3,#L1_CACHE_BYTES,gr3
+ cmp gr4,gr3,icc0
+ bhi icc0,#0,4b
+
+#if 0 // Fujitsu recommend to skip this and will update docs.
+ # (15) Release the interrupt mask setting of the MASK register of
+ # the interrupt controller if necessary.
+ sti gr13,@(gr12,#0)
+#endif
+ # (16) Set 1' in the PSR.ET bit, and permit interrupt.
+ movgs gr14,psr
+
+ bralr
+
+ .size frv_change_cmode, .-frv_change_cmode
diff --git a/arch/frv/kernel/debug-stub.c b/arch/frv/kernel/debug-stub.c
new file mode 100644
index 00000000000..2845139c807
--- /dev/null
+++ b/arch/frv/kernel/debug-stub.c
@@ -0,0 +1,259 @@
+/* debug-stub.c: debug-mode stub
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/serial_reg.h>
+#include <linux/start_kernel.h>
+
+#include <asm/system.h>
+#include <asm/serial-regs.h>
+#include <asm/timer-regs.h>
+#include <asm/irc-regs.h>
+#include <asm/gdb-stub.h>
+#include "gdb-io.h"
+
+/* CPU board CON5 */
+#define __UART0(X) (*(volatile uint8_t *)(UART0_BASE + (UART_##X)))
+
+#define LSR_WAIT_FOR0(STATE) \
+do { \
+} while (!(__UART0(LSR) & UART_LSR_##STATE))
+
+#define FLOWCTL_QUERY0(LINE) ({ __UART0(MSR) & UART_MSR_##LINE; })
+#define FLOWCTL_CLEAR0(LINE) do { __UART0(MCR) &= ~UART_MCR_##LINE; } while (0)
+#define FLOWCTL_SET0(LINE) do { __UART0(MCR) |= UART_MCR_##LINE; } while (0)
+
+#define FLOWCTL_WAIT_FOR0(LINE) \
+do { \
+ gdbstub_do_rx(); \
+} while(!FLOWCTL_QUERY(LINE))
+
+struct frv_debug_status __debug_status;
+
+static void __init debug_stub_init(void);
+
+/*****************************************************************************/
+/*
+ * debug mode handler stub
+ * - we come here with the CPU in debug mode and with exceptions disabled
+ * - handle debugging services for userspace
+ */
+asmlinkage void debug_stub(void)
+{
+ unsigned long hsr0;
+ int type = 0;
+
+ static u8 inited = 0;
+ if (!inited) {
+ debug_stub_init();
+ type = -1;
+ inited = 1;
+ }
+
+ hsr0 = __get_HSR(0);
+ if (hsr0 & HSR0_ETMD)
+ __set_HSR(0, hsr0 & ~HSR0_ETMD);
+
+ /* disable single stepping */
+ __debug_status.dcr &= ~DCR_SE;
+
+ /* kernel mode can propose an exception be handled in debug mode by jumping to a special
+ * location */
+ if (__debug_frame->pc == (unsigned long) __break_hijack_kernel_event_breaks_here) {
+ /* replace the debug frame with the kernel frame and discard
+ * the top kernel context */
+ *__debug_frame = *__frame;
+ __frame = __debug_frame->next_frame;
+ __debug_status.brr = (__debug_frame->tbr & TBR_TT) << 12;
+ __debug_status.brr |= BRR_EB;
+ }
+
+ if (__debug_frame->pc == (unsigned long) __debug_bug_trap + 4) {
+ __debug_frame->pc = __debug_frame->lr;
+ type = __debug_frame->gr8;
+ }
+
+#ifdef CONFIG_GDBSTUB
+ gdbstub(type);
+#endif
+
+ if (hsr0 & HSR0_ETMD)
+ __set_HSR(0, __get_HSR(0) | HSR0_ETMD);
+
+} /* end debug_stub() */
+
+/*****************************************************************************/
+/*
+ * debug stub initialisation
+ */
+static void __init debug_stub_init(void)
+{
+ __set_IRR(6, 0xff000000); /* map ERRs to NMI */
+ __set_IITMR(1, 0x20000000); /* ERR0/1, UART0/1 IRQ detect levels */
+
+ asm volatile(" movgs gr0,ibar0 \n"
+ " movgs gr0,ibar1 \n"
+ " movgs gr0,ibar2 \n"
+ " movgs gr0,ibar3 \n"
+ " movgs gr0,dbar0 \n"
+ " movgs gr0,dbmr00 \n"
+ " movgs gr0,dbmr01 \n"
+ " movgs gr0,dbdr00 \n"
+ " movgs gr0,dbdr01 \n"
+ " movgs gr0,dbar1 \n"
+ " movgs gr0,dbmr10 \n"
+ " movgs gr0,dbmr11 \n"
+ " movgs gr0,dbdr10 \n"
+ " movgs gr0,dbdr11 \n"
+ );
+
+ /* deal with debugging stub initialisation and initial pause */
+ if (__debug_frame->pc == (unsigned long) __debug_stub_init_break)
+ __debug_frame->pc = (unsigned long) start_kernel;
+
+ /* enable the debug events we want to trap */
+ __debug_status.dcr = DCR_EBE;
+
+#ifdef CONFIG_GDBSTUB
+ gdbstub_init();
+#endif
+
+ __clr_MASK_all();
+ __clr_MASK(15);
+ __clr_RC(15);
+
+} /* end debug_stub_init() */
+
+/*****************************************************************************/
+/*
+ * kernel "exit" trap for gdb stub
+ */
+void debug_stub_exit(int status)
+{
+
+#ifdef CONFIG_GDBSTUB
+ gdbstub_exit(status);
+#endif
+
+} /* end debug_stub_exit() */
+
+/*****************************************************************************/
+/*
+ * send string to serial port
+ */
+void debug_to_serial(const char *p, int n)
+{
+ char ch;
+
+ for (; n > 0; n--) {
+ ch = *p++;
+ FLOWCTL_SET0(DTR);
+ LSR_WAIT_FOR0(THRE);
+ // FLOWCTL_WAIT_FOR(CTS);
+
+ if (ch == 0x0a) {
+ __UART0(TX) = 0x0d;
+ mb();
+ LSR_WAIT_FOR0(THRE);
+ // FLOWCTL_WAIT_FOR(CTS);
+ }
+ __UART0(TX) = ch;
+ mb();
+
+ FLOWCTL_CLEAR0(DTR);
+ }
+
+} /* end debug_to_serial() */
+
+/*****************************************************************************/
+/*
+ * send string to serial port
+ */
+void debug_to_serial2(const char *fmt, ...)
+{
+ va_list va;
+ char buf[64];
+ int n;
+
+ va_start(va, fmt);
+ n = vsprintf(buf, fmt, va);
+ va_end(va);
+
+ debug_to_serial(buf, n);
+
+} /* end debug_to_serial2() */
+
+/*****************************************************************************/
+/*
+ * set up the ttyS0 serial port baud rate timers
+ */
+void __init console_set_baud(unsigned baud)
+{
+ unsigned value, high, low;
+ u8 lcr;
+
+ /* work out the divisor to give us the nearest higher baud rate */
+ value = __serial_clock_speed_HZ / 16 / baud;
+
+ /* determine the baud rate range */
+ high = __serial_clock_speed_HZ / 16 / value;
+ low = __serial_clock_speed_HZ / 16 / (value + 1);
+
+ /* pick the nearest bound */
+ if (low + (high - low) / 2 > baud)
+ value++;
+
+ lcr = __UART0(LCR);
+ __UART0(LCR) |= UART_LCR_DLAB;
+ mb();
+ __UART0(DLL) = value & 0xff;
+ __UART0(DLM) = (value >> 8) & 0xff;
+ mb();
+ __UART0(LCR) = lcr;
+ mb();
+
+} /* end console_set_baud() */
+
+/*****************************************************************************/
+/*
+ *
+ */
+int __init console_get_baud(void)
+{
+ unsigned value;
+ u8 lcr;
+
+ lcr = __UART0(LCR);
+ __UART0(LCR) |= UART_LCR_DLAB;
+ mb();
+ value = __UART0(DLM) << 8;
+ value |= __UART0(DLL);
+ __UART0(LCR) = lcr;
+ mb();
+
+ return value;
+} /* end console_get_baud() */
+
+/*****************************************************************************/
+/*
+ * display BUG() info
+ */
+#ifndef CONFIG_NO_KERNEL_MSG
+void __debug_bug_printk(const char *file, unsigned line)
+{
+ printk("kernel BUG at %s:%d!\n", file, line);
+
+} /* end __debug_bug_printk() */
+#endif
diff --git a/arch/frv/kernel/dma.c b/arch/frv/kernel/dma.c
new file mode 100644
index 00000000000..156184e17e5
--- /dev/null
+++ b/arch/frv/kernel/dma.c
@@ -0,0 +1,463 @@
+/* dma.c: DMA controller management on FR401 and the like
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/dma.h>
+#include <asm/gpio-regs.h>
+#include <asm/irc-regs.h>
+#include <asm/cpu-irqs.h>
+
+struct frv_dma_channel {
+ uint8_t flags;
+#define FRV_DMA_FLAGS_RESERVED 0x01
+#define FRV_DMA_FLAGS_INUSE 0x02
+#define FRV_DMA_FLAGS_PAUSED 0x04
+ uint8_t cap; /* capabilities available */
+ int irq; /* completion IRQ */
+ uint32_t dreqbit;
+ uint32_t dackbit;
+ uint32_t donebit;
+ const unsigned long ioaddr; /* DMA controller regs addr */
+ const char *devname;
+ dma_irq_handler_t handler;
+ void *data;
+};
+
+
+#define __get_DMAC(IO,X) ({ *(volatile unsigned long *)((IO) + DMAC_##X##x); })
+
+#define __set_DMAC(IO,X,V) \
+do { \
+ *(volatile unsigned long *)((IO) + DMAC_##X##x) = (V); \
+ mb(); \
+} while(0)
+
+#define ___set_DMAC(IO,X,V) \
+do { \
+ *(volatile unsigned long *)((IO) + DMAC_##X##x) = (V); \
+} while(0)
+
+
+static struct frv_dma_channel frv_dma_channels[FRV_DMA_NCHANS] = {
+ [0] = {
+ .cap = FRV_DMA_CAP_DREQ | FRV_DMA_CAP_DACK | FRV_DMA_CAP_DONE,
+ .irq = IRQ_CPU_DMA0,
+ .dreqbit = SIR_DREQ0_INPUT,
+ .dackbit = SOR_DACK0_OUTPUT,
+ .donebit = SOR_DONE0_OUTPUT,
+ .ioaddr = 0xfe000900,
+ },
+ [1] = {
+ .cap = FRV_DMA_CAP_DREQ | FRV_DMA_CAP_DACK | FRV_DMA_CAP_DONE,
+ .irq = IRQ_CPU_DMA1,
+ .dreqbit = SIR_DREQ1_INPUT,
+ .dackbit = SOR_DACK1_OUTPUT,
+ .donebit = SOR_DONE1_OUTPUT,
+ .ioaddr = 0xfe000980,
+ },
+ [2] = {
+ .cap = FRV_DMA_CAP_DREQ | FRV_DMA_CAP_DACK,
+ .irq = IRQ_CPU_DMA2,
+ .dreqbit = SIR_DREQ2_INPUT,
+ .dackbit = SOR_DACK2_OUTPUT,
+ .ioaddr = 0xfe000a00,
+ },
+ [3] = {
+ .cap = FRV_DMA_CAP_DREQ | FRV_DMA_CAP_DACK,
+ .irq = IRQ_CPU_DMA3,
+ .dreqbit = SIR_DREQ3_INPUT,
+ .dackbit = SOR_DACK3_OUTPUT,
+ .ioaddr = 0xfe000a80,
+ },
+ [4] = {
+ .cap = FRV_DMA_CAP_DREQ,
+ .irq = IRQ_CPU_DMA4,
+ .dreqbit = SIR_DREQ4_INPUT,
+ .ioaddr = 0xfe001000,
+ },
+ [5] = {
+ .cap = FRV_DMA_CAP_DREQ,
+ .irq = IRQ_CPU_DMA5,
+ .dreqbit = SIR_DREQ5_INPUT,
+ .ioaddr = 0xfe001080,
+ },
+ [6] = {
+ .cap = FRV_DMA_CAP_DREQ,
+ .irq = IRQ_CPU_DMA6,
+ .dreqbit = SIR_DREQ6_INPUT,
+ .ioaddr = 0xfe001100,
+ },
+ [7] = {
+ .cap = FRV_DMA_CAP_DREQ,
+ .irq = IRQ_CPU_DMA7,
+ .dreqbit = SIR_DREQ7_INPUT,
+ .ioaddr = 0xfe001180,
+ },
+};
+
+static DEFINE_RWLOCK(frv_dma_channels_lock);
+
+unsigned long frv_dma_inprogress;
+
+#define frv_clear_dma_inprogress(channel) \
+ atomic_clear_mask(1 << (channel), &frv_dma_inprogress);
+
+#define frv_set_dma_inprogress(channel) \
+ atomic_set_mask(1 << (channel), &frv_dma_inprogress);
+
+/*****************************************************************************/
+/*
+ * DMA irq handler - determine channel involved, grab status and call real handler
+ */
+static irqreturn_t dma_irq_handler(int irq, void *_channel)
+{
+ struct frv_dma_channel *channel = _channel;
+
+ frv_clear_dma_inprogress(channel - frv_dma_channels);
+ return channel->handler(channel - frv_dma_channels,
+ __get_DMAC(channel->ioaddr, CSTR),
+ channel->data);
+
+} /* end dma_irq_handler() */
+
+/*****************************************************************************/
+/*
+ * Determine which DMA controllers are present on this CPU
+ */
+void __init frv_dma_init(void)
+{
+ unsigned long psr = __get_PSR();
+ int num_dma, i;
+
+ /* First, determine how many DMA channels are available */
+ switch (PSR_IMPLE(psr)) {
+ case PSR_IMPLE_FR405:
+ case PSR_IMPLE_FR451:
+ case PSR_IMPLE_FR501:
+ case PSR_IMPLE_FR551:
+ num_dma = FRV_DMA_8CHANS;
+ break;
+
+ case PSR_IMPLE_FR401:
+ default:
+ num_dma = FRV_DMA_4CHANS;
+ break;
+ }
+
+ /* Now mark all of the non-existent channels as reserved */
+ for(i = num_dma; i < FRV_DMA_NCHANS; i++)
+ frv_dma_channels[i].flags = FRV_DMA_FLAGS_RESERVED;
+
+} /* end frv_dma_init() */
+
+/*****************************************************************************/
+/*
+ * allocate a DMA controller channel and the IRQ associated with it
+ */
+int frv_dma_open(const char *devname,
+ unsigned long dmamask,
+ int dmacap,
+ dma_irq_handler_t handler,
+ unsigned long irq_flags,
+ void *data)
+{
+ struct frv_dma_channel *channel;
+ int dma, ret;
+ uint32_t val;
+
+ write_lock(&frv_dma_channels_lock);
+
+ ret = -ENOSPC;
+
+ for (dma = FRV_DMA_NCHANS - 1; dma >= 0; dma--) {
+ channel = &frv_dma_channels[dma];
+
+ if (!test_bit(dma, &dmamask))
+ continue;
+
+ if ((channel->cap & dmacap) != dmacap)
+ continue;
+
+ if (!frv_dma_channels[dma].flags)
+ goto found;
+ }
+
+ goto out;
+
+ found:
+ ret = request_irq(channel->irq, dma_irq_handler, irq_flags, devname, channel);
+ if (ret < 0)
+ goto out;
+
+ /* okay, we've allocated all the resources */
+ channel = &frv_dma_channels[dma];
+
+ channel->flags |= FRV_DMA_FLAGS_INUSE;
+ channel->devname = devname;
+ channel->handler = handler;
+ channel->data = data;
+
+ /* Now make sure we are set up for DMA and not GPIO */
+ /* SIR bit must be set for DMA to work */
+ __set_SIR(channel->dreqbit | __get_SIR());
+ /* SOR bits depend on what the caller requests */
+ val = __get_SOR();
+ if(dmacap & FRV_DMA_CAP_DACK)
+ val |= channel->dackbit;
+ else
+ val &= ~channel->dackbit;
+ if(dmacap & FRV_DMA_CAP_DONE)
+ val |= channel->donebit;
+ else
+ val &= ~channel->donebit;
+ __set_SOR(val);
+
+ ret = dma;
+ out:
+ write_unlock(&frv_dma_channels_lock);
+ return ret;
+} /* end frv_dma_open() */
+
+EXPORT_SYMBOL(frv_dma_open);
+
+/*****************************************************************************/
+/*
+ * close a DMA channel and its associated interrupt
+ */
+void frv_dma_close(int dma)
+{
+ struct frv_dma_channel *channel = &frv_dma_channels[dma];
+ unsigned long flags;
+
+ write_lock_irqsave(&frv_dma_channels_lock, flags);
+
+ free_irq(channel->irq, channel);
+ frv_dma_stop(dma);
+
+ channel->flags &= ~FRV_DMA_FLAGS_INUSE;
+
+ write_unlock_irqrestore(&frv_dma_channels_lock, flags);
+} /* end frv_dma_close() */
+
+EXPORT_SYMBOL(frv_dma_close);
+
+/*****************************************************************************/
+/*
+ * set static configuration on a DMA channel
+ */
+void frv_dma_config(int dma, unsigned long ccfr, unsigned long cctr, unsigned long apr)
+{
+ unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
+
+ ___set_DMAC(ioaddr, CCFR, ccfr);
+ ___set_DMAC(ioaddr, CCTR, cctr);
+ ___set_DMAC(ioaddr, APR, apr);
+ mb();
+
+} /* end frv_dma_config() */
+
+EXPORT_SYMBOL(frv_dma_config);
+
+/*****************************************************************************/
+/*
+ * start a DMA channel
+ */
+void frv_dma_start(int dma,
+ unsigned long sba, unsigned long dba,
+ unsigned long pix, unsigned long six, unsigned long bcl)
+{
+ unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
+
+ ___set_DMAC(ioaddr, SBA, sba);
+ ___set_DMAC(ioaddr, DBA, dba);
+ ___set_DMAC(ioaddr, PIX, pix);
+ ___set_DMAC(ioaddr, SIX, six);
+ ___set_DMAC(ioaddr, BCL, bcl);
+ ___set_DMAC(ioaddr, CSTR, 0);
+ mb();
+
+ __set_DMAC(ioaddr, CCTR, __get_DMAC(ioaddr, CCTR) | DMAC_CCTRx_ACT);
+ frv_set_dma_inprogress(dma);
+
+} /* end frv_dma_start() */
+
+EXPORT_SYMBOL(frv_dma_start);
+
+/*****************************************************************************/
+/*
+ * restart a DMA channel that's been stopped in circular addressing mode by comparison-end
+ */
+void frv_dma_restart_circular(int dma, unsigned long six)
+{
+ unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
+
+ ___set_DMAC(ioaddr, SIX, six);
+ ___set_DMAC(ioaddr, CSTR, __get_DMAC(ioaddr, CSTR) & ~DMAC_CSTRx_CE);
+ mb();
+
+ __set_DMAC(ioaddr, CCTR, __get_DMAC(ioaddr, CCTR) | DMAC_CCTRx_ACT);
+ frv_set_dma_inprogress(dma);
+
+} /* end frv_dma_restart_circular() */
+
+EXPORT_SYMBOL(frv_dma_restart_circular);
+
+/*****************************************************************************/
+/*
+ * stop a DMA channel
+ */
+void frv_dma_stop(int dma)
+{
+ unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
+ uint32_t cctr;
+
+ ___set_DMAC(ioaddr, CSTR, 0);
+ cctr = __get_DMAC(ioaddr, CCTR);
+ cctr &= ~(DMAC_CCTRx_IE | DMAC_CCTRx_ACT);
+ cctr |= DMAC_CCTRx_FC; /* fifo clear */
+ __set_DMAC(ioaddr, CCTR, cctr);
+ __set_DMAC(ioaddr, BCL, 0);
+ frv_clear_dma_inprogress(dma);
+} /* end frv_dma_stop() */
+
+EXPORT_SYMBOL(frv_dma_stop);
+
+/*****************************************************************************/
+/*
+ * test interrupt status of DMA channel
+ */
+int is_frv_dma_interrupting(int dma)
+{
+ unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
+
+ return __get_DMAC(ioaddr, CSTR) & (1 << 23);
+
+} /* end is_frv_dma_interrupting() */
+
+EXPORT_SYMBOL(is_frv_dma_interrupting);
+
+/*****************************************************************************/
+/*
+ * dump data about a DMA channel
+ */
+void frv_dma_dump(int dma)
+{
+ unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
+ unsigned long cstr, pix, six, bcl;
+
+ cstr = __get_DMAC(ioaddr, CSTR);
+ pix = __get_DMAC(ioaddr, PIX);
+ six = __get_DMAC(ioaddr, SIX);
+ bcl = __get_DMAC(ioaddr, BCL);
+
+ printk("DMA[%d] cstr=%lx pix=%lx six=%lx bcl=%lx\n", dma, cstr, pix, six, bcl);
+
+} /* end frv_dma_dump() */
+
+EXPORT_SYMBOL(frv_dma_dump);
+
+/*****************************************************************************/
+/*
+ * pause all DMA controllers
+ * - called by clock mangling routines
+ * - caller must be holding interrupts disabled
+ */
+void frv_dma_pause_all(void)
+{
+ struct frv_dma_channel *channel;
+ unsigned long ioaddr;
+ unsigned long cstr, cctr;
+ int dma;
+
+ write_lock(&frv_dma_channels_lock);
+
+ for (dma = FRV_DMA_NCHANS - 1; dma >= 0; dma--) {
+ channel = &frv_dma_channels[dma];
+
+ if (!(channel->flags & FRV_DMA_FLAGS_INUSE))
+ continue;
+
+ ioaddr = channel->ioaddr;
+ cctr = __get_DMAC(ioaddr, CCTR);
+ if (cctr & DMAC_CCTRx_ACT) {
+ cctr &= ~DMAC_CCTRx_ACT;
+ __set_DMAC(ioaddr, CCTR, cctr);
+
+ do {
+ cstr = __get_DMAC(ioaddr, CSTR);
+ } while (cstr & DMAC_CSTRx_BUSY);
+
+ if (cstr & DMAC_CSTRx_FED)
+ channel->flags |= FRV_DMA_FLAGS_PAUSED;
+ frv_clear_dma_inprogress(dma);
+ }
+ }
+
+} /* end frv_dma_pause_all() */
+
+EXPORT_SYMBOL(frv_dma_pause_all);
+
+/*****************************************************************************/
+/*
+ * resume paused DMA controllers
+ * - called by clock mangling routines
+ * - caller must be holding interrupts disabled
+ */
+void frv_dma_resume_all(void)
+{
+ struct frv_dma_channel *channel;
+ unsigned long ioaddr;
+ unsigned long cstr, cctr;
+ int dma;
+
+ for (dma = FRV_DMA_NCHANS - 1; dma >= 0; dma--) {
+ channel = &frv_dma_channels[dma];
+
+ if (!(channel->flags & FRV_DMA_FLAGS_PAUSED))
+ continue;
+
+ ioaddr = channel->ioaddr;
+ cstr = __get_DMAC(ioaddr, CSTR);
+ cstr &= ~(DMAC_CSTRx_FED | DMAC_CSTRx_INT);
+ __set_DMAC(ioaddr, CSTR, cstr);
+
+ cctr = __get_DMAC(ioaddr, CCTR);
+ cctr |= DMAC_CCTRx_ACT;
+ __set_DMAC(ioaddr, CCTR, cctr);
+
+ channel->flags &= ~FRV_DMA_FLAGS_PAUSED;
+ frv_set_dma_inprogress(dma);
+ }
+
+ write_unlock(&frv_dma_channels_lock);
+
+} /* end frv_dma_resume_all() */
+
+EXPORT_SYMBOL(frv_dma_resume_all);
+
+/*****************************************************************************/
+/*
+ * dma status clear
+ */
+void frv_dma_status_clear(int dma)
+{
+ unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
+ uint32_t cctr;
+ ___set_DMAC(ioaddr, CSTR, 0);
+
+ cctr = __get_DMAC(ioaddr, CCTR);
+} /* end frv_dma_status_clear() */
+
+EXPORT_SYMBOL(frv_dma_status_clear);
diff --git a/arch/frv/kernel/entry-table.S b/arch/frv/kernel/entry-table.S
new file mode 100644
index 00000000000..06c5ae191e5
--- /dev/null
+++ b/arch/frv/kernel/entry-table.S
@@ -0,0 +1,329 @@
+/* entry-table.S: main trap vector tables and exception jump table
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/spr-regs.h>
+
+###############################################################################
+#
+# Declare the main trap and vector tables
+#
+# There are six tables:
+#
+# (1) The trap table for debug mode
+# (2) The trap table for kernel mode
+# (3) The trap table for user mode
+#
+# The CPU jumps to an appropriate slot in the appropriate table to perform
+# exception processing. We have three different tables for the three
+# different CPU modes because there is no hardware differentiation between
+# stack pointers for these three modes, and so we have to invent one when
+# crossing mode boundaries.
+#
+# (4) The exception handler vector table
+#
+# The user and kernel trap tables use the same prologue for normal
+# exception processing. The prologue then jumps to the handler in this
+# table, as indexed by the exception ID from the TBR.
+#
+# (5) The fixup table for kernel-trap single-step
+# (6) The fixup table for user-trap single-step
+#
+# Due to the way single-stepping works on this CPU (single-step is not
+# disabled when crossing exception boundaries, only when in debug mode),
+# we have to catch the single-step event in break.S and jump to the fixup
+# routine pointed to by this table.
+#
+# The linker script places the user mode and kernel mode trap tables on to
+# the same 8Kb page, so that break.S can be more efficient when performing
+# single-step bypass management
+#
+###############################################################################
+
+ # trap table for entry from debug mode
+ .section .trap.break,"ax"
+ .balign 256*16
+ .globl __entry_breaktrap_table
+__entry_breaktrap_table:
+
+ # trap table for entry from user mode
+ .section .trap.user,"ax"
+ .balign 256*16
+ .globl __entry_usertrap_table
+__entry_usertrap_table:
+
+ # trap table for entry from kernel mode
+ .section .trap.kernel,"ax"
+ .balign 256*16
+ .globl __entry_kerneltrap_table
+__entry_kerneltrap_table:
+
+ # exception handler jump table
+ .section .trap.vector,"ax"
+ .balign 256*4
+ .globl __entry_vector_table
+__entry_vector_table:
+
+ # trap fixup table for single-stepping in user mode
+ .section .trap.fixup.user,"a"
+ .balign 256*4
+ .globl __break_usertrap_fixup_table
+__break_usertrap_fixup_table:
+
+ # trap fixup table for single-stepping in user mode
+ .section .trap.fixup.kernel,"a"
+ .balign 256*4
+ .globl __break_kerneltrap_fixup_table
+__break_kerneltrap_fixup_table:
+
+ # handler declaration for a software or program interrupt
+.macro VECTOR_SOFTPROG tbr_tt, vec
+ .section .trap.user
+ .org \tbr_tt
+ bra __entry_uspace_softprog_interrupt
+ .section .trap.fixup.user
+ .org \tbr_tt >> 2
+ .long __break_step_uspace_softprog_interrupt
+ .section .trap.kernel
+ .org \tbr_tt
+ bra __entry_kernel_softprog_interrupt
+ .section .trap.fixup.kernel
+ .org \tbr_tt >> 2
+ .long __break_step_kernel_softprog_interrupt
+ .section .trap.vector
+ .org \tbr_tt >> 2
+ .long \vec
+.endm
+
+ # handler declaration for a maskable external interrupt
+.macro VECTOR_IRQ tbr_tt, vec
+ .section .trap.user
+ .org \tbr_tt
+ bra __entry_uspace_external_interrupt
+ .section .trap.fixup.user
+ .org \tbr_tt >> 2
+ .long __break_step_uspace_external_interrupt
+ .section .trap.kernel
+ .org \tbr_tt
+ # deal with virtual interrupt disablement
+ beq icc2,#0,__entry_kernel_external_interrupt_virtually_disabled
+ bra __entry_kernel_external_interrupt
+ .section .trap.fixup.kernel
+ .org \tbr_tt >> 2
+ .long __break_step_kernel_external_interrupt
+ .section .trap.vector
+ .org \tbr_tt >> 2
+ .long \vec
+.endm
+
+ # handler declaration for an NMI external interrupt
+.macro VECTOR_NMI tbr_tt, vec
+ .section .trap.user
+ .org \tbr_tt
+ break
+ break
+ break
+ break
+ .section .trap.kernel
+ .org \tbr_tt
+ break
+ break
+ break
+ break
+ .section .trap.vector
+ .org \tbr_tt >> 2
+ .long \vec
+.endm
+
+ # handler declaration for an MMU only software or program interrupt
+.macro VECTOR_SP_MMU tbr_tt, vec
+#ifdef CONFIG_MMU
+ VECTOR_SOFTPROG \tbr_tt, \vec
+#else
+ VECTOR_NMI \tbr_tt, 0
+#endif
+.endm
+
+
+###############################################################################
+#
+# specification of the vectors
+# - note: each macro inserts code into multiple sections
+#
+###############################################################################
+ VECTOR_SP_MMU TBR_TT_INSTR_MMU_MISS, __entry_insn_mmu_miss
+ VECTOR_SOFTPROG TBR_TT_INSTR_ACC_ERROR, __entry_insn_access_error
+ VECTOR_SOFTPROG TBR_TT_INSTR_ACC_EXCEP, __entry_insn_access_exception
+ VECTOR_SOFTPROG TBR_TT_PRIV_INSTR, __entry_privileged_instruction
+ VECTOR_SOFTPROG TBR_TT_ILLEGAL_INSTR, __entry_illegal_instruction
+ VECTOR_SOFTPROG TBR_TT_FP_EXCEPTION, __entry_media_exception
+ VECTOR_SOFTPROG TBR_TT_MP_EXCEPTION, __entry_media_exception
+ VECTOR_SOFTPROG TBR_TT_DATA_ACC_ERROR, __entry_data_access_error
+ VECTOR_SP_MMU TBR_TT_DATA_MMU_MISS, __entry_data_mmu_miss
+ VECTOR_SOFTPROG TBR_TT_DATA_ACC_EXCEP, __entry_data_access_exception
+ VECTOR_SOFTPROG TBR_TT_DATA_STR_ERROR, __entry_data_store_error
+ VECTOR_SOFTPROG TBR_TT_DIVISION_EXCEP, __entry_division_exception
+
+#ifdef CONFIG_MMU
+ .section .trap.user
+ .org TBR_TT_INSTR_TLB_MISS
+ .globl __trap_user_insn_tlb_miss
+__trap_user_insn_tlb_miss:
+ movsg ear0,gr28 /* faulting address */
+ movsg scr0,gr31 /* get mapped PTD coverage start address */
+ xor.p gr28,gr31,gr31 /* compare addresses */
+ bra __entry_user_insn_tlb_miss
+
+ .org TBR_TT_DATA_TLB_MISS
+ .globl __trap_user_data_tlb_miss
+__trap_user_data_tlb_miss:
+ movsg ear0,gr28 /* faulting address */
+ movsg scr1,gr31 /* get mapped PTD coverage start address */
+ xor.p gr28,gr31,gr31 /* compare addresses */
+ bra __entry_user_data_tlb_miss
+
+ .section .trap.kernel
+ .org TBR_TT_INSTR_TLB_MISS
+ .globl __trap_kernel_insn_tlb_miss
+__trap_kernel_insn_tlb_miss:
+ movsg ear0,gr29 /* faulting address */
+ movsg scr0,gr31 /* get mapped PTD coverage start address */
+ xor.p gr29,gr31,gr31 /* compare addresses */
+ bra __entry_kernel_insn_tlb_miss
+
+ .org TBR_TT_DATA_TLB_MISS
+ .globl __trap_kernel_data_tlb_miss
+__trap_kernel_data_tlb_miss:
+ movsg ear0,gr29 /* faulting address */
+ movsg scr1,gr31 /* get mapped PTD coverage start address */
+ xor.p gr29,gr31,gr31 /* compare addresses */
+ bra __entry_kernel_data_tlb_miss
+
+ .section .trap.fixup.user
+ .org TBR_TT_INSTR_TLB_MISS >> 2
+ .globl __trap_fixup_user_insn_tlb_miss
+__trap_fixup_user_insn_tlb_miss:
+ .long __break_user_insn_tlb_miss
+ .org TBR_TT_DATA_TLB_MISS >> 2
+ .globl __trap_fixup_user_data_tlb_miss
+__trap_fixup_user_data_tlb_miss:
+ .long __break_user_data_tlb_miss
+
+ .section .trap.fixup.kernel
+ .org TBR_TT_INSTR_TLB_MISS >> 2
+ .globl __trap_fixup_kernel_insn_tlb_miss
+__trap_fixup_kernel_insn_tlb_miss:
+ .long __break_kernel_insn_tlb_miss
+ .org TBR_TT_DATA_TLB_MISS >> 2
+ .globl __trap_fixup_kernel_data_tlb_miss
+__trap_fixup_kernel_data_tlb_miss:
+ .long __break_kernel_data_tlb_miss
+
+ .section .trap.vector
+ .org TBR_TT_INSTR_TLB_MISS >> 2
+ .long __entry_insn_mmu_fault
+ .org TBR_TT_DATA_TLB_MISS >> 2
+ .long __entry_data_mmu_fault
+#endif
+
+ VECTOR_SP_MMU TBR_TT_DATA_DAT_EXCEP, __entry_data_dat_fault
+ VECTOR_NMI TBR_TT_DECREMENT_TIMER, __entry_do_NMI
+ VECTOR_SOFTPROG TBR_TT_COMPOUND_EXCEP, __entry_compound_exception
+ VECTOR_IRQ TBR_TT_INTERRUPT_1, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_2, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_3, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_4, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_5, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_6, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_7, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_8, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_9, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_10, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_11, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_12, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_13, __entry_do_IRQ
+ VECTOR_IRQ TBR_TT_INTERRUPT_14, __entry_do_IRQ
+ VECTOR_NMI TBR_TT_INTERRUPT_15, __entry_do_NMI
+
+ # miscellaneous user mode entry points
+ .section .trap.user
+ .org TBR_TT_TRAP0
+ .rept 127
+ bra __entry_uspace_softprog_interrupt
+ .long 0,0,0
+ .endr
+ .org TBR_TT_BREAK
+ bra __entry_break
+ .long 0,0,0
+
+ .section .trap.fixup.user
+ .org TBR_TT_TRAP0 >> 2
+ .rept 127
+ .long __break_step_uspace_softprog_interrupt
+ .endr
+ .org TBR_TT_BREAK >> 2
+ .long 0
+
+ # miscellaneous kernel mode entry points
+ .section .trap.kernel
+ .org TBR_TT_TRAP0
+ bra __entry_kernel_softprog_interrupt
+ .org TBR_TT_TRAP1
+ bra __entry_kernel_softprog_interrupt
+
+ # trap #2 in kernel - reenable interrupts
+ .org TBR_TT_TRAP2
+ bra __entry_kernel_external_interrupt_virtual_reenable
+
+ # miscellaneous kernel traps
+ .org TBR_TT_TRAP3
+ .rept 124
+ bra __entry_kernel_softprog_interrupt
+ .long 0,0,0
+ .endr
+ .org TBR_TT_BREAK
+ bra __entry_break
+ .long 0,0,0
+
+ .section .trap.fixup.kernel
+ .org TBR_TT_TRAP0 >> 2
+ .long __break_step_kernel_softprog_interrupt
+ .long __break_step_kernel_softprog_interrupt
+ .long __break_step_kernel_external_interrupt_virtual_reenable
+ .rept 124
+ .long __break_step_kernel_softprog_interrupt
+ .endr
+ .org TBR_TT_BREAK >> 2
+ .long 0
+
+ # miscellaneous debug mode entry points
+ .section .trap.break
+ .org TBR_TT_BREAK
+ movsg bpcsr,gr30
+ jmpl @(gr30,gr0)
+
+ # miscellaneous vectors
+ .section .trap.vector
+ .org TBR_TT_TRAP0 >> 2
+ .long system_call
+ .rept 119
+ .long __entry_unsupported_trap
+ .endr
+
+ # userspace atomic op emulation, traps 120-126
+ .rept 7
+ .long __entry_atomic_op
+ .endr
+
+ .org TBR_TT_BREAK >> 2
+ .long __entry_debug_exception
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
new file mode 100644
index 00000000000..5ba23f715ea
--- /dev/null
+++ b/arch/frv/kernel/entry.S
@@ -0,0 +1,1531 @@
+/* entry.S: FR-V entry
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * Entry to the kernel is "interesting":
+ * (1) There are no stack pointers, not even for the kernel
+ * (2) General Registers should not be clobbered
+ * (3) There are no kernel-only data registers
+ * (4) Since all addressing modes are wrt to a General Register, no global
+ * variables can be reached
+ *
+ * We deal with this by declaring that we shall kill GR28 on entering the
+ * kernel from userspace
+ *
+ * However, since break interrupts can interrupt the CPU even when PSR.ET==0,
+ * they can't rely on GR28 to be anything useful, and so need to clobber a
+ * separate register (GR31). Break interrupts are managed in break.S
+ *
+ * GR29 _is_ saved, and holds the current task pointer globally
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/setup.h>
+#include <asm/segment.h>
+#include <asm/ptrace.h>
+#include <asm/errno.h>
+#include <asm/cache.h>
+#include <asm/spr-regs.h>
+
+#define nr_syscalls ((syscall_table_size)/4)
+
+ .section .text..entry
+ .balign 4
+
+.macro LEDS val
+# sethi.p %hi(0xe1200004),gr30
+# setlo %lo(0xe1200004),gr30
+# setlos #~\val,gr31
+# st gr31,@(gr30,gr0)
+# sethi.p %hi(0xffc00100),gr30
+# setlo %lo(0xffc00100),gr30
+# sth gr0,@(gr30,gr0)
+# membar
+.endm
+
+.macro LEDS32
+# not gr31,gr31
+# sethi.p %hi(0xe1200004),gr30
+# setlo %lo(0xe1200004),gr30
+# st.p gr31,@(gr30,gr0)
+# srli gr31,#16,gr31
+# sethi.p %hi(0xffc00100),gr30
+# setlo %lo(0xffc00100),gr30
+# sth gr31,@(gr30,gr0)
+# membar
+.endm
+
+###############################################################################
+#
+# entry point for External interrupts received whilst executing userspace code
+#
+###############################################################################
+ .globl __entry_uspace_external_interrupt
+ .type __entry_uspace_external_interrupt,@function
+__entry_uspace_external_interrupt:
+ LEDS 0x6200
+ sethi.p %hi(__kernel_frame0_ptr),gr28
+ setlo %lo(__kernel_frame0_ptr),gr28
+ ldi @(gr28,#0),gr28
+
+ # handle h/w single-step through exceptions
+ sti gr0,@(gr28,#REG__STATUS)
+
+ .globl __entry_uspace_external_interrupt_reentry
+__entry_uspace_external_interrupt_reentry:
+ LEDS 0x6201
+
+ setlos #REG__END,gr30
+ dcpl gr28,gr30,#0
+
+ # finish building the exception frame
+ sti sp, @(gr28,#REG_SP)
+ stdi gr2, @(gr28,#REG_GR(2))
+ stdi gr4, @(gr28,#REG_GR(4))
+ stdi gr6, @(gr28,#REG_GR(6))
+ stdi gr8, @(gr28,#REG_GR(8))
+ stdi gr10,@(gr28,#REG_GR(10))
+ stdi gr12,@(gr28,#REG_GR(12))
+ stdi gr14,@(gr28,#REG_GR(14))
+ stdi gr16,@(gr28,#REG_GR(16))
+ stdi gr18,@(gr28,#REG_GR(18))
+ stdi gr20,@(gr28,#REG_GR(20))
+ stdi gr22,@(gr28,#REG_GR(22))
+ stdi gr24,@(gr28,#REG_GR(24))
+ stdi gr26,@(gr28,#REG_GR(26))
+ sti gr0, @(gr28,#REG_GR(28))
+ sti gr29,@(gr28,#REG_GR(29))
+ stdi.p gr30,@(gr28,#REG_GR(30))
+
+ # set up the kernel stack pointer
+ ori gr28,0,sp
+
+ movsg tbr ,gr20
+ movsg psr ,gr22
+ movsg pcsr,gr21
+ movsg isr ,gr23
+ movsg ccr ,gr24
+ movsg cccr,gr25
+ movsg lr ,gr26
+ movsg lcr ,gr27
+
+ setlos.p #-1,gr4
+ andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
+ andi.p gr22,#~(PSR_PS|PSR_S),gr6
+ slli gr5,#1,gr5
+ or gr6,gr5,gr5
+ andi gr5,#~PSR_ET,gr5
+
+ sti gr20,@(gr28,#REG_TBR)
+ sti gr21,@(gr28,#REG_PC)
+ sti gr5 ,@(gr28,#REG_PSR)
+ sti gr23,@(gr28,#REG_ISR)
+ stdi gr24,@(gr28,#REG_CCR)
+ stdi gr26,@(gr28,#REG_LR)
+ sti gr4 ,@(gr28,#REG_SYSCALLNO)
+
+ movsg iacc0h,gr4
+ movsg iacc0l,gr5
+ stdi gr4,@(gr28,#REG_IACC0)
+
+ movsg gner0,gr4
+ movsg gner1,gr5
+ stdi.p gr4,@(gr28,#REG_GNER0)
+
+ # interrupts start off fully disabled in the interrupt handler
+ subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
+
+ # set up kernel global registers
+ sethi.p %hi(__kernel_current_task),gr5
+ setlo %lo(__kernel_current_task),gr5
+ sethi.p %hi(_gp),gr16
+ setlo %lo(_gp),gr16
+ ldi @(gr5,#0),gr29
+ ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
+
+ # make sure we (the kernel) get div-zero and misalignment exceptions
+ setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
+ movgs gr5,isr
+
+ # switch to the kernel trap table
+ sethi.p %hi(__entry_kerneltrap_table),gr6
+ setlo %lo(__entry_kerneltrap_table),gr6
+ movgs gr6,tbr
+
+ # set the return address
+ sethi.p %hi(__entry_return_from_user_interrupt),gr4
+ setlo %lo(__entry_return_from_user_interrupt),gr4
+ movgs gr4,lr
+
+ # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
+ movsg psr,gr4
+
+ ori gr4,#PSR_PIL_14,gr4
+ movgs gr4,psr
+ ori gr4,#PSR_PIL_14|PSR_ET,gr4
+ movgs gr4,psr
+
+ LEDS 0x6202
+ bra do_IRQ
+
+ .size __entry_uspace_external_interrupt,.-__entry_uspace_external_interrupt
+
+###############################################################################
+#
+# entry point for External interrupts received whilst executing kernel code
+# - on arriving here, the following registers should already be set up:
+# GR15 - current thread_info struct pointer
+# GR16 - kernel GP-REL pointer
+# GR29 - current task struct pointer
+# TBR - kernel trap vector table
+# ISR - kernel's preferred integer controls
+#
+###############################################################################
+ .globl __entry_kernel_external_interrupt
+ .type __entry_kernel_external_interrupt,@function
+__entry_kernel_external_interrupt:
+ LEDS 0x6210
+// sub sp,gr15,gr31
+// LEDS32
+
+ # set up the stack pointer
+ or.p sp,gr0,gr30
+ subi sp,#REG__END,sp
+ sti gr30,@(sp,#REG_SP)
+
+ # handle h/w single-step through exceptions
+ sti gr0,@(sp,#REG__STATUS)
+
+ .globl __entry_kernel_external_interrupt_reentry
+__entry_kernel_external_interrupt_reentry:
+ LEDS 0x6211
+
+ # set up the exception frame
+ setlos #REG__END,gr30
+ dcpl sp,gr30,#0
+
+ sti.p gr28,@(sp,#REG_GR(28))
+ ori sp,0,gr28
+
+ # finish building the exception frame
+ stdi gr2,@(gr28,#REG_GR(2))
+ stdi gr4,@(gr28,#REG_GR(4))
+ stdi gr6,@(gr28,#REG_GR(6))
+ stdi gr8,@(gr28,#REG_GR(8))
+ stdi gr10,@(gr28,#REG_GR(10))
+ stdi gr12,@(gr28,#REG_GR(12))
+ stdi gr14,@(gr28,#REG_GR(14))
+ stdi gr16,@(gr28,#REG_GR(16))
+ stdi gr18,@(gr28,#REG_GR(18))
+ stdi gr20,@(gr28,#REG_GR(20))
+ stdi gr22,@(gr28,#REG_GR(22))
+ stdi gr24,@(gr28,#REG_GR(24))
+ stdi gr26,@(gr28,#REG_GR(26))
+ sti gr29,@(gr28,#REG_GR(29))
+ stdi.p gr30,@(gr28,#REG_GR(30))
+
+ # note virtual interrupts will be fully enabled upon return
+ subicc gr0,#1,gr0,icc2 /* clear Z, set C */
+
+ movsg tbr ,gr20
+ movsg psr ,gr22
+ movsg pcsr,gr21
+ movsg isr ,gr23
+ movsg ccr ,gr24
+ movsg cccr,gr25
+ movsg lr ,gr26
+ movsg lcr ,gr27
+
+ setlos.p #-1,gr4
+ andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
+ andi.p gr22,#~(PSR_PS|PSR_S),gr6
+ slli gr5,#1,gr5
+ or gr6,gr5,gr5
+ andi.p gr5,#~PSR_ET,gr5
+
+ # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
+ # - for an explanation of how it works, see: Documentation/frv/atomic-ops.txt
+ andi gr25,#~0xc0,gr25
+
+ sti gr20,@(gr28,#REG_TBR)
+ sti gr21,@(gr28,#REG_PC)
+ sti gr5 ,@(gr28,#REG_PSR)
+ sti gr23,@(gr28,#REG_ISR)
+ stdi gr24,@(gr28,#REG_CCR)
+ stdi gr26,@(gr28,#REG_LR)
+ sti gr4 ,@(gr28,#REG_SYSCALLNO)
+
+ movsg iacc0h,gr4
+ movsg iacc0l,gr5
+ stdi gr4,@(gr28,#REG_IACC0)
+
+ movsg gner0,gr4
+ movsg gner1,gr5
+ stdi.p gr4,@(gr28,#REG_GNER0)
+
+ # interrupts start off fully disabled in the interrupt handler
+ subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
+
+ # set the return address
+ sethi.p %hi(__entry_return_from_kernel_interrupt),gr4
+ setlo %lo(__entry_return_from_kernel_interrupt),gr4
+ movgs gr4,lr
+
+ # clear power-saving mode flags
+ movsg hsr0,gr4
+ andi gr4,#~HSR0_PDM,gr4
+ movgs gr4,hsr0
+
+ # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_PIL_14,gr4
+ movgs gr4,psr
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+
+ LEDS 0x6212
+ bra do_IRQ
+
+ .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt
+
+###############################################################################
+#
+# deal with interrupts that were actually virtually disabled
+# - we need to really disable them, flag the fact and return immediately
+# - if you change this, you must alter break.S also
+#
+###############################################################################
+ .balign L1_CACHE_BYTES
+ .globl __entry_kernel_external_interrupt_virtually_disabled
+ .type __entry_kernel_external_interrupt_virtually_disabled,@function
+__entry_kernel_external_interrupt_virtually_disabled:
+ movsg psr,gr30
+ andi gr30,#~PSR_PIL,gr30
+ ori gr30,#PSR_PIL_14,gr30 ; debugging interrupts only
+ movgs gr30,psr
+ subcc gr0,gr0,gr0,icc2 ; leave Z set, clear C
+ rett #0
+
+ .size __entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled
+
+###############################################################################
+#
+# deal with re-enablement of interrupts that were pending when virtually re-enabled
+# - set ICC2.C, re-enable the real interrupts and return
+# - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI]
+# - if you change this, you must alter break.S also
+#
+###############################################################################
+ .balign L1_CACHE_BYTES
+ .globl __entry_kernel_external_interrupt_virtual_reenable
+ .type __entry_kernel_external_interrupt_virtual_reenable,@function
+__entry_kernel_external_interrupt_virtual_reenable:
+ movsg psr,gr30
+ andi gr30,#~PSR_PIL,gr30 ; re-enable interrupts
+ movgs gr30,psr
+ subicc gr0,#1,gr0,icc2 ; clear Z, set C
+ rett #0
+
+ .size __entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable
+
+###############################################################################
+#
+# entry point for Software and Progam interrupts generated whilst executing userspace code
+#
+###############################################################################
+ .globl __entry_uspace_softprog_interrupt
+ .type __entry_uspace_softprog_interrupt,@function
+ .globl __entry_uspace_handle_mmu_fault
+__entry_uspace_softprog_interrupt:
+ LEDS 0x6000
+#ifdef CONFIG_MMU
+ movsg ear0,gr28
+__entry_uspace_handle_mmu_fault:
+ movgs gr28,scr2
+#endif
+ sethi.p %hi(__kernel_frame0_ptr),gr28
+ setlo %lo(__kernel_frame0_ptr),gr28
+ ldi @(gr28,#0),gr28
+
+ # handle h/w single-step through exceptions
+ sti gr0,@(gr28,#REG__STATUS)
+
+ .globl __entry_uspace_softprog_interrupt_reentry
+__entry_uspace_softprog_interrupt_reentry:
+ LEDS 0x6001
+
+ setlos #REG__END,gr30
+ dcpl gr28,gr30,#0
+
+ # set up the kernel stack pointer
+ sti.p sp,@(gr28,#REG_SP)
+ ori gr28,0,sp
+ sti gr0,@(gr28,#REG_GR(28))
+
+ stdi gr20,@(gr28,#REG_GR(20))
+ stdi gr22,@(gr28,#REG_GR(22))
+
+ movsg tbr,gr20
+ movsg pcsr,gr21
+ movsg psr,gr22
+
+ sethi.p %hi(__entry_return_from_user_exception),gr23
+ setlo %lo(__entry_return_from_user_exception),gr23
+
+ bra __entry_common
+
+ .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt
+
+ # single-stepping was disabled on entry to a TLB handler that then faulted
+#ifdef CONFIG_MMU
+ .globl __entry_uspace_handle_mmu_fault_sstep
+__entry_uspace_handle_mmu_fault_sstep:
+ movgs gr28,scr2
+ sethi.p %hi(__kernel_frame0_ptr),gr28
+ setlo %lo(__kernel_frame0_ptr),gr28
+ ldi @(gr28,#0),gr28
+
+ # flag single-step re-enablement
+ sti gr0,@(gr28,#REG__STATUS)
+ bra __entry_uspace_softprog_interrupt_reentry
+#endif
+
+
+###############################################################################
+#
+# entry point for Software and Progam interrupts generated whilst executing kernel code
+#
+###############################################################################
+ .globl __entry_kernel_softprog_interrupt
+ .type __entry_kernel_softprog_interrupt,@function
+__entry_kernel_softprog_interrupt:
+ LEDS 0x6004
+
+#ifdef CONFIG_MMU
+ movsg ear0,gr30
+ movgs gr30,scr2
+#endif
+
+ .globl __entry_kernel_handle_mmu_fault
+__entry_kernel_handle_mmu_fault:
+ # set up the stack pointer
+ subi sp,#REG__END,sp
+ sti sp,@(sp,#REG_SP)
+ sti sp,@(sp,#REG_SP-4)
+ andi sp,#~7,sp
+
+ # handle h/w single-step through exceptions
+ sti gr0,@(sp,#REG__STATUS)
+
+ .globl __entry_kernel_softprog_interrupt_reentry
+__entry_kernel_softprog_interrupt_reentry:
+ LEDS 0x6005
+
+ setlos #REG__END,gr30
+ dcpl sp,gr30,#0
+
+ # set up the exception frame
+ sti.p gr28,@(sp,#REG_GR(28))
+ ori sp,0,gr28
+
+ stdi gr20,@(gr28,#REG_GR(20))
+ stdi gr22,@(gr28,#REG_GR(22))
+
+ ldi @(sp,#REG_SP),gr22 /* reconstruct the old SP */
+ addi gr22,#REG__END,gr22
+ sti gr22,@(sp,#REG_SP)
+
+ # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
+ # - for an explanation of how it works, see: Documentation/frv/atomic-ops.txt
+ movsg cccr,gr20
+ andi gr20,#~0xc0,gr20
+ movgs gr20,cccr
+
+ movsg tbr,gr20
+ movsg pcsr,gr21
+ movsg psr,gr22
+
+ sethi.p %hi(__entry_return_from_kernel_exception),gr23
+ setlo %lo(__entry_return_from_kernel_exception),gr23
+ bra __entry_common
+
+ .size __entry_kernel_softprog_interrupt,.-__entry_kernel_softprog_interrupt
+
+ # single-stepping was disabled on entry to a TLB handler that then faulted
+#ifdef CONFIG_MMU
+ .globl __entry_kernel_handle_mmu_fault_sstep
+__entry_kernel_handle_mmu_fault_sstep:
+ # set up the stack pointer
+ subi sp,#REG__END,sp
+ sti sp,@(sp,#REG_SP)
+ sti sp,@(sp,#REG_SP-4)
+ andi sp,#~7,sp
+
+ # flag single-step re-enablement
+ sethi #REG__STATUS_STEP,gr30
+ sti gr30,@(sp,#REG__STATUS)
+ bra __entry_kernel_softprog_interrupt_reentry
+#endif
+
+
+###############################################################################
+#
+# the rest of the kernel entry point code
+# - on arriving here, the following registers should be set up:
+# GR1 - kernel stack pointer
+# GR7 - syscall number (trap 0 only)
+# GR8-13 - syscall args (trap 0 only)
+# GR20 - saved TBR
+# GR21 - saved PC
+# GR22 - saved PSR
+# GR23 - return handler address
+# GR28 - exception frame on stack
+# SCR2 - saved EAR0 where applicable (clobbered by ICI & ICEF insns on FR451)
+# PSR - PSR.S 1, PSR.ET 0
+#
+###############################################################################
+ .globl __entry_common
+ .type __entry_common,@function
+__entry_common:
+ LEDS 0x6008
+
+ # finish building the exception frame
+ stdi gr2,@(gr28,#REG_GR(2))
+ stdi gr4,@(gr28,#REG_GR(4))
+ stdi gr6,@(gr28,#REG_GR(6))
+ stdi gr8,@(gr28,#REG_GR(8))
+ stdi gr10,@(gr28,#REG_GR(10))
+ stdi gr12,@(gr28,#REG_GR(12))
+ stdi gr14,@(gr28,#REG_GR(14))
+ stdi gr16,@(gr28,#REG_GR(16))
+ stdi gr18,@(gr28,#REG_GR(18))
+ stdi gr24,@(gr28,#REG_GR(24))
+ stdi gr26,@(gr28,#REG_GR(26))
+ sti gr29,@(gr28,#REG_GR(29))
+ stdi gr30,@(gr28,#REG_GR(30))
+
+ movsg lcr ,gr27
+ movsg lr ,gr26
+ movgs gr23,lr
+ movsg cccr,gr25
+ movsg ccr ,gr24
+ movsg isr ,gr23
+
+ setlos.p #-1,gr4
+ andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
+ andi.p gr22,#~(PSR_PS|PSR_S),gr6
+ slli gr5,#1,gr5
+ or gr6,gr5,gr5
+ andi gr5,#~PSR_ET,gr5
+
+ sti gr20,@(gr28,#REG_TBR)
+ sti gr21,@(gr28,#REG_PC)
+ sti gr5 ,@(gr28,#REG_PSR)
+ sti gr23,@(gr28,#REG_ISR)
+ stdi gr24,@(gr28,#REG_CCR)
+ stdi gr26,@(gr28,#REG_LR)
+ sti gr4 ,@(gr28,#REG_SYSCALLNO)
+
+ movsg iacc0h,gr4
+ movsg iacc0l,gr5
+ stdi gr4,@(gr28,#REG_IACC0)
+
+ movsg gner0,gr4
+ movsg gner1,gr5
+ stdi.p gr4,@(gr28,#REG_GNER0)
+
+ # set up virtual interrupt disablement
+ subicc gr0,#1,gr0,icc2 /* clear Z flag, set C flag */
+
+ # set up kernel global registers
+ sethi.p %hi(__kernel_current_task),gr5
+ setlo %lo(__kernel_current_task),gr5
+ sethi.p %hi(_gp),gr16
+ setlo %lo(_gp),gr16
+ ldi @(gr5,#0),gr29
+ ldi @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
+
+ # switch to the kernel trap table
+ sethi.p %hi(__entry_kerneltrap_table),gr6
+ setlo %lo(__entry_kerneltrap_table),gr6
+ movgs gr6,tbr
+
+ # make sure we (the kernel) get div-zero and misalignment exceptions
+ setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
+ movgs gr5,isr
+
+ # clear power-saving mode flags
+ movsg hsr0,gr4
+ andi gr4,#~HSR0_PDM,gr4
+ movgs gr4,hsr0
+
+ # multiplex again using old TBR as a guide
+ setlos.p #TBR_TT,gr3
+ sethi %hi(__entry_vector_table),gr6
+ and.p gr20,gr3,gr5
+ setlo %lo(__entry_vector_table),gr6
+ srli gr5,#2,gr5
+ ld @(gr5,gr6),gr5
+
+ LEDS 0x6009
+ jmpl @(gr5,gr0)
+
+
+ .size __entry_common,.-__entry_common
+
+###############################################################################
+#
+# handle instruction MMU fault
+#
+###############################################################################
+#ifdef CONFIG_MMU
+ .globl __entry_insn_mmu_fault
+__entry_insn_mmu_fault:
+ LEDS 0x6010
+ setlos #0,gr8
+ movsg esr0,gr9
+ movsg scr2,gr10
+
+ # now that we've accessed the exception regs, we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+
+ sethi.p %hi(do_page_fault),gr5
+ setlo %lo(do_page_fault),gr5
+ jmpl @(gr5,gr0) ; call do_page_fault(0,esr0,ear0)
+#endif
+
+
+###############################################################################
+#
+# handle instruction access error
+#
+###############################################################################
+ .globl __entry_insn_access_error
+__entry_insn_access_error:
+ LEDS 0x6011
+ sethi.p %hi(insn_access_error),gr5
+ setlo %lo(insn_access_error),gr5
+ movsg esfr1,gr8
+ movsg epcr0,gr9
+ movsg esr0,gr10
+
+ # now that we've accessed the exception regs, we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+ jmpl @(gr5,gr0) ; call insn_access_error(esfr1,epcr0,esr0)
+
+###############################################################################
+#
+# handle various instructions of dubious legality
+#
+###############################################################################
+ .globl __entry_unsupported_trap
+ .globl __entry_illegal_instruction
+ .globl __entry_privileged_instruction
+ .globl __entry_debug_exception
+__entry_unsupported_trap:
+ subi gr21,#4,gr21
+ sti gr21,@(gr28,#REG_PC)
+__entry_illegal_instruction:
+__entry_privileged_instruction:
+__entry_debug_exception:
+ LEDS 0x6012
+ sethi.p %hi(illegal_instruction),gr5
+ setlo %lo(illegal_instruction),gr5
+ movsg esfr1,gr8
+ movsg epcr0,gr9
+ movsg esr0,gr10
+
+ # now that we've accessed the exception regs, we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+ jmpl @(gr5,gr0) ; call ill_insn(esfr1,epcr0,esr0)
+
+###############################################################################
+#
+# handle atomic operation emulation for userspace
+#
+###############################################################################
+ .globl __entry_atomic_op
+__entry_atomic_op:
+ LEDS 0x6012
+ sethi.p %hi(atomic_operation),gr5
+ setlo %lo(atomic_operation),gr5
+ movsg esfr1,gr8
+ movsg epcr0,gr9
+ movsg esr0,gr10
+
+ # now that we've accessed the exception regs, we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+ jmpl @(gr5,gr0) ; call atomic_operation(esfr1,epcr0,esr0)
+
+###############################################################################
+#
+# handle media exception
+#
+###############################################################################
+ .globl __entry_media_exception
+__entry_media_exception:
+ LEDS 0x6013
+ sethi.p %hi(media_exception),gr5
+ setlo %lo(media_exception),gr5
+ movsg msr0,gr8
+ movsg msr1,gr9
+
+ # now that we've accessed the exception regs, we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+ jmpl @(gr5,gr0) ; call media_excep(msr0,msr1)
+
+###############################################################################
+#
+# handle data MMU fault
+# handle data DAT fault (write-protect exception)
+#
+###############################################################################
+#ifdef CONFIG_MMU
+ .globl __entry_data_mmu_fault
+__entry_data_mmu_fault:
+ .globl __entry_data_dat_fault
+__entry_data_dat_fault:
+ LEDS 0x6014
+ setlos #1,gr8
+ movsg esr0,gr9
+ movsg scr2,gr10 ; saved EAR0
+
+ # now that we've accessed the exception regs, we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+
+ sethi.p %hi(do_page_fault),gr5
+ setlo %lo(do_page_fault),gr5
+ jmpl @(gr5,gr0) ; call do_page_fault(1,esr0,ear0)
+#endif
+
+###############################################################################
+#
+# handle data and instruction access exceptions
+#
+###############################################################################
+ .globl __entry_insn_access_exception
+ .globl __entry_data_access_exception
+__entry_insn_access_exception:
+__entry_data_access_exception:
+ LEDS 0x6016
+ sethi.p %hi(memory_access_exception),gr5
+ setlo %lo(memory_access_exception),gr5
+ movsg esr0,gr8
+ movsg scr2,gr9 ; saved EAR0
+ movsg epcr0,gr10
+
+ # now that we've accessed the exception regs, we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+ jmpl @(gr5,gr0) ; call memory_access_error(esr0,ear0,epcr0)
+
+###############################################################################
+#
+# handle data access error
+#
+###############################################################################
+ .globl __entry_data_access_error
+__entry_data_access_error:
+ LEDS 0x6016
+ sethi.p %hi(data_access_error),gr5
+ setlo %lo(data_access_error),gr5
+ movsg esfr1,gr8
+ movsg esr15,gr9
+ movsg ear15,gr10
+
+ # now that we've accessed the exception regs, we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+ jmpl @(gr5,gr0) ; call data_access_error(esfr1,esr15,ear15)
+
+###############################################################################
+#
+# handle data store error
+#
+###############################################################################
+ .globl __entry_data_store_error
+__entry_data_store_error:
+ LEDS 0x6017
+ sethi.p %hi(data_store_error),gr5
+ setlo %lo(data_store_error),gr5
+ movsg esfr1,gr8
+ movsg esr14,gr9
+
+ # now that we've accessed the exception regs, we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+ jmpl @(gr5,gr0) ; call data_store_error(esfr1,esr14)
+
+###############################################################################
+#
+# handle division exception
+#
+###############################################################################
+ .globl __entry_division_exception
+__entry_division_exception:
+ LEDS 0x6018
+ sethi.p %hi(division_exception),gr5
+ setlo %lo(division_exception),gr5
+ movsg esfr1,gr8
+ movsg esr0,gr9
+ movsg isr,gr10
+
+ # now that we've accessed the exception regs, we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+ jmpl @(gr5,gr0) ; call div_excep(esfr1,esr0,isr)
+
+###############################################################################
+#
+# handle compound exception
+#
+###############################################################################
+ .globl __entry_compound_exception
+__entry_compound_exception:
+ LEDS 0x6019
+ sethi.p %hi(compound_exception),gr5
+ setlo %lo(compound_exception),gr5
+ movsg esfr1,gr8
+ movsg esr0,gr9
+ movsg esr14,gr10
+ movsg esr15,gr11
+ movsg msr0,gr12
+ movsg msr1,gr13
+
+ # now that we've accessed the exception regs, we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+ jmpl @(gr5,gr0) ; call comp_excep(esfr1,esr0,esr14,esr15,msr0,msr1)
+
+###############################################################################
+#
+# handle interrupts and NMIs
+#
+###############################################################################
+ .globl __entry_do_IRQ
+__entry_do_IRQ:
+ LEDS 0x6020
+
+ # we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+ bra do_IRQ
+
+ .globl __entry_do_NMI
+__entry_do_NMI:
+ LEDS 0x6021
+
+ # we can enable exceptions
+ movsg psr,gr4
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+ bra do_NMI
+
+###############################################################################
+#
+# the return path for a newly forked child process
+# - __switch_to() saved the old current pointer in GR8 for us
+#
+###############################################################################
+ .globl ret_from_fork
+ret_from_fork:
+ LEDS 0x6100
+ call schedule_tail
+
+ # fork & co. return 0 to child
+ setlos.p #0,gr8
+ bra __syscall_exit
+
+###################################################################################################
+#
+# Return to user mode is not as complex as all this looks,
+# but we want the default path for a system call return to
+# go as quickly as possible which is why some of this is
+# less clear than it otherwise should be.
+#
+###################################################################################################
+ .balign L1_CACHE_BYTES
+ .globl system_call
+system_call:
+ LEDS 0x6101
+ movsg psr,gr4 ; enable exceptions
+ ori gr4,#PSR_ET,gr4
+ movgs gr4,psr
+
+ sti gr7,@(gr28,#REG_SYSCALLNO)
+ sti.p gr8,@(gr28,#REG_ORIG_GR8)
+
+ subicc gr7,#nr_syscalls,gr0,icc0
+ bnc icc0,#0,__syscall_badsys
+
+ ldi @(gr15,#TI_FLAGS),gr4
+ andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
+ bne icc0,#0,__syscall_trace_entry
+
+__syscall_call:
+ slli.p gr7,#2,gr7
+ sethi %hi(sys_call_table),gr5
+ setlo %lo(sys_call_table),gr5
+ ld @(gr5,gr7),gr4
+ calll @(gr4,gr0)
+
+
+###############################################################################
+#
+# return to interrupted process
+#
+###############################################################################
+__syscall_exit:
+ LEDS 0x6300
+
+ sti gr8,@(gr28,#REG_GR(8)) ; save return value
+
+ # rebuild saved psr - execve will change it for init/main.c
+ ldi @(gr28,#REG_PSR),gr22
+ srli gr22,#1,gr5
+ andi.p gr22,#~PSR_PS,gr22
+ andi gr5,#PSR_PS,gr5
+ or gr5,gr22,gr22
+ ori gr22,#PSR_S,gr22
+
+ # keep current PSR in GR23
+ movsg psr,gr23
+
+ # make sure we don't miss an interrupt setting need_resched or sigpending between
+ # sampling and the RETT
+ ori gr23,#PSR_PIL_14,gr23
+ movgs gr23,psr
+
+ ldi @(gr15,#TI_FLAGS),gr4
+ sethi.p %hi(_TIF_ALLWORK_MASK),gr5
+ setlo %lo(_TIF_ALLWORK_MASK),gr5
+ andcc gr4,gr5,gr0,icc0
+ bne icc0,#0,__syscall_exit_work
+
+ # restore all registers and return
+__entry_return_direct:
+ LEDS 0x6301
+
+ andi gr22,#~PSR_ET,gr22
+ movgs gr22,psr
+
+ ldi @(gr28,#REG_ISR),gr23
+ lddi @(gr28,#REG_CCR),gr24
+ lddi @(gr28,#REG_LR) ,gr26
+ ldi @(gr28,#REG_PC) ,gr21
+ ldi @(gr28,#REG_TBR),gr20
+
+ movgs gr20,tbr
+ movgs gr21,pcsr
+ movgs gr23,isr
+ movgs gr24,ccr
+ movgs gr25,cccr
+ movgs gr26,lr
+ movgs gr27,lcr
+
+ lddi @(gr28,#REG_GNER0),gr4
+ movgs gr4,gner0
+ movgs gr5,gner1
+
+ lddi @(gr28,#REG_IACC0),gr4
+ movgs gr4,iacc0h
+ movgs gr5,iacc0l
+
+ lddi @(gr28,#REG_GR(4)) ,gr4
+ lddi @(gr28,#REG_GR(6)) ,gr6
+ lddi @(gr28,#REG_GR(8)) ,gr8
+ lddi @(gr28,#REG_GR(10)),gr10
+ lddi @(gr28,#REG_GR(12)),gr12
+ lddi @(gr28,#REG_GR(14)),gr14
+ lddi @(gr28,#REG_GR(16)),gr16
+ lddi @(gr28,#REG_GR(18)),gr18
+ lddi @(gr28,#REG_GR(20)),gr20
+ lddi @(gr28,#REG_GR(22)),gr22
+ lddi @(gr28,#REG_GR(24)),gr24
+ lddi @(gr28,#REG_GR(26)),gr26
+ ldi @(gr28,#REG_GR(29)),gr29
+ lddi @(gr28,#REG_GR(30)),gr30
+
+ # check to see if a debugging return is required
+ LEDS 0x67f0
+ movsg ccr,gr2
+ ldi @(gr28,#REG__STATUS),gr3
+ andicc gr3,#REG__STATUS_STEP,gr0,icc0
+ bne icc0,#0,__entry_return_singlestep
+ movgs gr2,ccr
+
+ ldi @(gr28,#REG_SP) ,sp
+ lddi @(gr28,#REG_GR(2)) ,gr2
+ ldi @(gr28,#REG_GR(28)),gr28
+
+ LEDS 0x67fe
+// movsg pcsr,gr31
+// LEDS32
+
+#if 0
+ # store the current frame in the workram on the FR451
+ movgs gr28,scr2
+ sethi.p %hi(0xfe800000),gr28
+ setlo %lo(0xfe800000),gr28
+
+ stdi gr2,@(gr28,#REG_GR(2))
+ stdi gr4,@(gr28,#REG_GR(4))
+ stdi gr6,@(gr28,#REG_GR(6))
+ stdi gr8,@(gr28,#REG_GR(8))
+ stdi gr10,@(gr28,#REG_GR(10))
+ stdi gr12,@(gr28,#REG_GR(12))
+ stdi gr14,@(gr28,#REG_GR(14))
+ stdi gr16,@(gr28,#REG_GR(16))
+ stdi gr18,@(gr28,#REG_GR(18))
+ stdi gr24,@(gr28,#REG_GR(24))
+ stdi gr26,@(gr28,#REG_GR(26))
+ sti gr29,@(gr28,#REG_GR(29))
+ stdi gr30,@(gr28,#REG_GR(30))
+
+ movsg tbr ,gr30
+ sti gr30,@(gr28,#REG_TBR)
+ movsg pcsr,gr30
+ sti gr30,@(gr28,#REG_PC)
+ movsg psr ,gr30
+ sti gr30,@(gr28,#REG_PSR)
+ movsg isr ,gr30
+ sti gr30,@(gr28,#REG_ISR)
+ movsg ccr ,gr30
+ movsg cccr,gr31
+ stdi gr30,@(gr28,#REG_CCR)
+ movsg lr ,gr30
+ movsg lcr ,gr31
+ stdi gr30,@(gr28,#REG_LR)
+ sti gr0 ,@(gr28,#REG_SYSCALLNO)
+ movsg scr2,gr28
+#endif
+
+ rett #0
+
+ # return via break.S
+__entry_return_singlestep:
+ movgs gr2,ccr
+ lddi @(gr28,#REG_GR(2)) ,gr2
+ ldi @(gr28,#REG_SP) ,sp
+ ldi @(gr28,#REG_GR(28)),gr28
+ LEDS 0x67ff
+ break
+ .globl __entry_return_singlestep_breaks_here
+__entry_return_singlestep_breaks_here:
+ nop
+
+
+###############################################################################
+#
+# return to a process interrupted in kernel space
+# - we need to consider preemption if that is enabled
+#
+###############################################################################
+ .balign L1_CACHE_BYTES
+__entry_return_from_kernel_exception:
+ LEDS 0x6302
+ movsg psr,gr23
+ ori gr23,#PSR_PIL_14,gr23
+ movgs gr23,psr
+ bra __entry_return_direct
+
+ .balign L1_CACHE_BYTES
+__entry_return_from_kernel_interrupt:
+ LEDS 0x6303
+ movsg psr,gr23
+ ori gr23,#PSR_PIL_14,gr23
+ movgs gr23,psr
+
+#ifdef CONFIG_PREEMPT
+ ldi @(gr15,#TI_PRE_COUNT),gr5
+ subicc gr5,#0,gr0,icc0
+ beq icc0,#0,__entry_return_direct
+
+__entry_preempt_need_resched:
+ ldi @(gr15,#TI_FLAGS),gr4
+ andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
+ beq icc0,#1,__entry_return_direct
+
+ setlos #PREEMPT_ACTIVE,gr5
+ sti gr5,@(gr15,#TI_FLAGS)
+
+ andi gr23,#~PSR_PIL,gr23
+ movgs gr23,psr
+
+ call schedule
+ sti gr0,@(gr15,#TI_PRE_COUNT)
+
+ movsg psr,gr23
+ ori gr23,#PSR_PIL_14,gr23
+ movgs gr23,psr
+ bra __entry_preempt_need_resched
+#else
+ bra __entry_return_direct
+#endif
+
+
+###############################################################################
+#
+# perform work that needs to be done immediately before resumption
+#
+###############################################################################
+ .globl __entry_return_from_user_exception
+ .balign L1_CACHE_BYTES
+__entry_return_from_user_exception:
+ LEDS 0x6501
+
+__entry_resume_userspace:
+ # make sure we don't miss an interrupt setting need_resched or sigpending between
+ # sampling and the RETT
+ movsg psr,gr23
+ ori gr23,#PSR_PIL_14,gr23
+ movgs gr23,psr
+
+__entry_return_from_user_interrupt:
+ LEDS 0x6402
+ ldi @(gr15,#TI_FLAGS),gr4
+ sethi.p %hi(_TIF_WORK_MASK),gr5
+ setlo %lo(_TIF_WORK_MASK),gr5
+ andcc gr4,gr5,gr0,icc0
+ beq icc0,#1,__entry_return_direct
+
+__entry_work_pending:
+ LEDS 0x6404
+ andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
+ beq icc0,#1,__entry_work_notifysig
+
+__entry_work_resched:
+ LEDS 0x6408
+ movsg psr,gr23
+ andi gr23,#~PSR_PIL,gr23
+ movgs gr23,psr
+ call schedule
+ movsg psr,gr23
+ ori gr23,#PSR_PIL_14,gr23
+ movgs gr23,psr
+
+ LEDS 0x6401
+ ldi @(gr15,#TI_FLAGS),gr4
+ sethi.p %hi(_TIF_WORK_MASK),gr5
+ setlo %lo(_TIF_WORK_MASK),gr5
+ andcc gr4,gr5,gr0,icc0
+ beq icc0,#1,__entry_return_direct
+ andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
+ bne icc0,#1,__entry_work_resched
+
+__entry_work_notifysig:
+ LEDS 0x6410
+ ori.p gr4,#0,gr8
+ call do_notify_resume
+ bra __entry_resume_userspace
+
+ # perform syscall entry tracing
+__syscall_trace_entry:
+ LEDS 0x6320
+ call syscall_trace_entry
+
+ lddi.p @(gr28,#REG_GR(8)) ,gr8
+ ori gr8,#0,gr7 ; syscall_trace_entry() returned new syscallno
+ lddi @(gr28,#REG_GR(10)),gr10
+ lddi.p @(gr28,#REG_GR(12)),gr12
+
+ subicc gr7,#nr_syscalls,gr0,icc0
+ bnc icc0,#0,__syscall_badsys
+ bra __syscall_call
+
+ # perform syscall exit tracing
+__syscall_exit_work:
+ LEDS 0x6340
+ andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
+ beq icc0,#1,__entry_work_pending
+
+ movsg psr,gr23
+ andi gr23,#~PSR_PIL,gr23 ; could let syscall_trace_exit() call schedule()
+ movgs gr23,psr
+
+ call syscall_trace_exit
+ bra __entry_resume_userspace
+
+__syscall_badsys:
+ LEDS 0x6380
+ setlos #-ENOSYS,gr8
+ sti gr8,@(gr28,#REG_GR(8)) ; save return value
+ bra __entry_resume_userspace
+
+
+###############################################################################
+#
+# syscall vector table
+#
+###############################################################################
+ .section .rodata
+ALIGN
+ .globl sys_call_table
+sys_call_table:
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
+ .long sys_exit
+ .long sys_fork
+ .long sys_read
+ .long sys_write
+ .long sys_open /* 5 */
+ .long sys_close
+ .long sys_waitpid
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long sys_execve
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod /* 15 */
+ .long sys_lchown16
+ .long sys_ni_syscall /* old break syscall holder */
+ .long sys_stat
+ .long sys_lseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_oldumount
+ .long sys_setuid16
+ .long sys_getuid16
+ .long sys_ni_syscall // sys_stime /* 25 */
+ .long sys_ptrace
+ .long sys_alarm
+ .long sys_fstat
+ .long sys_pause
+ .long sys_utime /* 30 */
+ .long sys_ni_syscall /* old stty syscall holder */
+ .long sys_ni_syscall /* old gtty syscall holder */
+ .long sys_access
+ .long sys_nice
+ .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_ni_syscall /* old prof syscall holder */
+ .long sys_brk /* 45 */
+ .long sys_setgid16
+ .long sys_getgid16
+ .long sys_ni_syscall // sys_signal
+ .long sys_geteuid16
+ .long sys_getegid16 /* 50 */
+ .long sys_acct
+ .long sys_umount /* recycled never used phys( */
+ .long sys_ni_syscall /* old lock syscall holder */
+ .long sys_ioctl
+ .long sys_fcntl /* 55 */
+ .long sys_ni_syscall /* old mpx syscall holder */
+ .long sys_setpgid
+ .long sys_ni_syscall /* old ulimit syscall holder */
+ .long sys_ni_syscall /* old old uname syscall */
+ .long sys_umask /* 60 */
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp /* 65 */
+ .long sys_setsid
+ .long sys_sigaction
+ .long sys_ni_syscall // sys_sgetmask
+ .long sys_ni_syscall // sys_ssetmask
+ .long sys_setreuid16 /* 70 */
+ .long sys_setregid16
+ .long sys_sigsuspend
+ .long sys_ni_syscall // sys_sigpending
+ .long sys_sethostname
+ .long sys_setrlimit /* 75 */
+ .long sys_ni_syscall // sys_old_getrlimit
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_getgroups16 /* 80 */
+ .long sys_setgroups16
+ .long sys_ni_syscall /* old_select slot */
+ .long sys_symlink
+ .long sys_lstat
+ .long sys_readlink /* 85 */
+ .long sys_uselib
+ .long sys_swapon
+ .long sys_reboot
+ .long sys_ni_syscall // old_readdir
+ .long sys_ni_syscall /* 90 */ /* old_mmap slot */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_fchown16 /* 95 */
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* old profil syscall holder */
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_ni_syscall /* ioperm for i386 */
+ .long sys_socketcall
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer /* 105 */
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_ni_syscall /* obsolete olduname( syscall */
+ .long sys_ni_syscall /* iopl for i386 */ /* 110 */
+ .long sys_vhangup
+ .long sys_ni_syscall /* obsolete idle( syscall */
+ .long sys_ni_syscall /* vm86old for i386 */
+ .long sys_wait4
+ .long sys_swapoff /* 115 */
+ .long sys_sysinfo
+ .long sys_ipc
+ .long sys_fsync
+ .long sys_sigreturn
+ .long sys_clone /* 120 */
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_ni_syscall /* old "cacheflush" */
+ .long sys_adjtimex
+ .long sys_mprotect /* 125 */
+ .long sys_sigprocmask
+ .long sys_ni_syscall /* old "create_module" */
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_ni_syscall /* old "get_kernel_syms" */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+ .long sys_sysfs /* 135 */
+ .long sys_personality
+ .long sys_ni_syscall /* for afs_syscall */
+ .long sys_setfsuid16
+ .long sys_setfsgid16
+ .long sys_llseek /* 140 */
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_msync
+ .long sys_readv /* 145 */
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+ .long sys_mlock /* 150 */
+ .long sys_munlock
+ .long sys_mlockall
+ .long sys_munlockall
+ .long sys_sched_setparam
+ .long sys_sched_getparam /* 155 */
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+ .long sys_sched_get_priority_min /* 160 */
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_mremap
+ .long sys_setresuid16
+ .long sys_getresuid16 /* 165 */
+ .long sys_ni_syscall /* for vm86 */
+ .long sys_ni_syscall /* Old sys_query_module */
+ .long sys_poll
+ .long sys_ni_syscall /* Old nfsservctl */
+ .long sys_setresgid16 /* 170 */
+ .long sys_getresgid16
+ .long sys_prctl
+ .long sys_rt_sigreturn
+ .long sys_rt_sigaction
+ .long sys_rt_sigprocmask /* 175 */
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend
+ .long sys_pread64 /* 180 */
+ .long sys_pwrite64
+ .long sys_chown16
+ .long sys_getcwd
+ .long sys_capget
+ .long sys_capset /* 185 */
+ .long sys_sigaltstack
+ .long sys_sendfile
+ .long sys_ni_syscall /* streams1 */
+ .long sys_ni_syscall /* streams2 */
+ .long sys_vfork /* 190 */
+ .long sys_getrlimit
+ .long sys_mmap2
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_lchown
+ .long sys_getuid
+ .long sys_getgid /* 200 */
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+ .long sys_getgroups /* 205 */
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setresgid /* 210 */
+ .long sys_getresgid
+ .long sys_chown
+ .long sys_setuid
+ .long sys_setgid
+ .long sys_setfsuid /* 215 */
+ .long sys_setfsgid
+ .long sys_pivot_root
+ .long sys_mincore
+ .long sys_madvise
+ .long sys_getdents64 /* 220 */
+ .long sys_fcntl64
+ .long sys_ni_syscall /* reserved for TUX */
+ .long sys_ni_syscall /* Reserved for Security */
+ .long sys_gettid
+ .long sys_readahead /* 225 */
+ .long sys_setxattr
+ .long sys_lsetxattr
+ .long sys_fsetxattr
+ .long sys_getxattr
+ .long sys_lgetxattr /* 230 */
+ .long sys_fgetxattr
+ .long sys_listxattr
+ .long sys_llistxattr
+ .long sys_flistxattr
+ .long sys_removexattr /* 235 */
+ .long sys_lremovexattr
+ .long sys_fremovexattr
+ .long sys_tkill
+ .long sys_sendfile64
+ .long sys_futex /* 240 */
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity
+ .long sys_ni_syscall //sys_set_thread_area
+ .long sys_ni_syscall //sys_get_thread_area
+ .long sys_io_setup /* 245 */
+ .long sys_io_destroy
+ .long sys_io_getevents
+ .long sys_io_submit
+ .long sys_io_cancel
+ .long sys_fadvise64 /* 250 */
+ .long sys_ni_syscall
+ .long sys_exit_group
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl /* 255 */
+ .long sys_epoll_wait
+ .long sys_remap_file_pages
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime /* 260 */
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime /* 265 */
+ .long sys_clock_getres
+ .long sys_clock_nanosleep
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill /* 270 */
+ .long sys_utimes
+ .long sys_fadvise64_64
+ .long sys_ni_syscall /* sys_vserver */
+ .long sys_mbind
+ .long sys_get_mempolicy
+ .long sys_set_mempolicy
+ .long sys_mq_open
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive /* 280 */
+ .long sys_mq_notify
+ .long sys_mq_getsetattr
+ .long sys_ni_syscall /* reserved for kexec */
+ .long sys_waitid
+ .long sys_ni_syscall /* 285 */ /* available */
+ .long sys_add_key
+ .long sys_request_key
+ .long sys_keyctl
+ .long sys_ioprio_set
+ .long sys_ioprio_get /* 290 */
+ .long sys_inotify_init
+ .long sys_inotify_add_watch
+ .long sys_inotify_rm_watch
+ .long sys_migrate_pages
+ .long sys_openat /* 295 */
+ .long sys_mkdirat
+ .long sys_mknodat
+ .long sys_fchownat
+ .long sys_futimesat
+ .long sys_fstatat64 /* 300 */
+ .long sys_unlinkat
+ .long sys_renameat
+ .long sys_linkat
+ .long sys_symlinkat
+ .long sys_readlinkat /* 305 */
+ .long sys_fchmodat
+ .long sys_faccessat
+ .long sys_pselect6
+ .long sys_ppoll
+ .long sys_unshare /* 310 */
+ .long sys_set_robust_list
+ .long sys_get_robust_list
+ .long sys_splice
+ .long sys_sync_file_range
+ .long sys_tee /* 315 */
+ .long sys_vmsplice
+ .long sys_move_pages
+ .long sys_getcpu
+ .long sys_epoll_pwait
+ .long sys_utimensat /* 320 */
+ .long sys_signalfd
+ .long sys_timerfd_create
+ .long sys_eventfd
+ .long sys_fallocate
+ .long sys_timerfd_settime /* 325 */
+ .long sys_timerfd_gettime
+ .long sys_signalfd4
+ .long sys_eventfd2
+ .long sys_epoll_create1
+ .long sys_dup3 /* 330 */
+ .long sys_pipe2
+ .long sys_inotify_init1
+ .long sys_preadv
+ .long sys_pwritev
+ .long sys_rt_tgsigqueueinfo /* 335 */
+ .long sys_perf_event_open
+ .long sys_setns
+
+syscall_table_size = (. - sys_call_table)
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
new file mode 100644
index 00000000000..a89803b58b9
--- /dev/null
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -0,0 +1,114 @@
+#include <linux/module.h>
+#include <linux/linkage.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+
+#include <asm/setup.h>
+#include <asm/pgalloc.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/checksum.h>
+#include <asm/hardirq.h>
+#include <asm/cacheflush.h>
+
+extern long __memcpy_user(void *dst, const void *src, size_t count);
+extern long __memset_user(void *dst, const void *src, size_t count);
+
+/* platform dependent support */
+
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+
+EXPORT_SYMBOL(ip_fast_csum);
+
+#if 0
+EXPORT_SYMBOL(local_irq_count);
+EXPORT_SYMBOL(local_bh_count);
+#endif
+EXPORT_SYMBOL(kernel_thread);
+
+EXPORT_SYMBOL(__res_bus_clock_speed_HZ);
+EXPORT_SYMBOL(__page_offset);
+EXPORT_SYMBOL(__memcpy_user);
+EXPORT_SYMBOL(__memset_user);
+EXPORT_SYMBOL(frv_dcache_writeback);
+EXPORT_SYMBOL(frv_cache_invalidate);
+EXPORT_SYMBOL(frv_icache_invalidate);
+EXPORT_SYMBOL(frv_cache_wback_inv);
+
+#ifndef CONFIG_MMU
+EXPORT_SYMBOL(memory_start);
+EXPORT_SYMBOL(memory_end);
+#endif
+
+EXPORT_SYMBOL(__debug_bug_trap);
+
+/* The following are special because they're not called
+ explicitly (the C compiler generates them). Fortunately,
+ their interface isn't gonna change any time soon now, so
+ it's OK to leave it out of version control. */
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+
+EXPORT_SYMBOL(__outsl_ns);
+EXPORT_SYMBOL(__insl_ns);
+
+#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
+EXPORT_SYMBOL(atomic_test_and_OR_mask);
+EXPORT_SYMBOL(atomic_test_and_XOR_mask);
+EXPORT_SYMBOL(atomic_add_return);
+EXPORT_SYMBOL(atomic_sub_return);
+EXPORT_SYMBOL(__xchg_32);
+EXPORT_SYMBOL(__cmpxchg_32);
+#endif
+EXPORT_SYMBOL(atomic64_add_return);
+EXPORT_SYMBOL(atomic64_sub_return);
+EXPORT_SYMBOL(__xchg_64);
+EXPORT_SYMBOL(__cmpxchg_64);
+
+EXPORT_SYMBOL(__debug_bug_printk);
+EXPORT_SYMBOL(__delay_loops_MHz);
+
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+extern void __gcc_bcmp(void);
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __cmpdi2(void);
+extern void __divdi3(void);
+extern void __lshrdi3(void);
+extern void __moddi3(void);
+extern void __muldi3(void);
+extern void __mulll(void);
+extern void __umulll(void);
+extern void __negdi2(void);
+extern void __ucmpdi2(void);
+extern void __udivdi3(void);
+extern void __udivmoddi4(void);
+extern void __umoddi3(void);
+
+ /* gcc lib functions */
+//EXPORT_SYMBOL(__gcc_bcmp);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+//EXPORT_SYMBOL(__cmpdi2);
+//EXPORT_SYMBOL(__divdi3);
+EXPORT_SYMBOL(__lshrdi3);
+//EXPORT_SYMBOL(__moddi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__mulll);
+EXPORT_SYMBOL(__umulll);
+EXPORT_SYMBOL(__negdi2);
+EXPORT_SYMBOL(__ucmpdi2);
+//EXPORT_SYMBOL(__udivdi3);
+//EXPORT_SYMBOL(__udivmoddi4);
+//EXPORT_SYMBOL(__umoddi3);
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
new file mode 100644
index 00000000000..d155ca9e509
--- /dev/null
+++ b/arch/frv/kernel/futex.c
@@ -0,0 +1,242 @@
+/* futex.c: futex operations
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/futex.h>
+#include <asm/errno.h>
+
+/*
+ * the various futex operations; MMU fault checking is ignored under no-MMU
+ * conditions
+ */
+static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval)
+{
+ int oldval, ret;
+
+ asm("0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ "2: cst.p %3,%M0 ,cc3,#1 \n"
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
+ " beq icc3,#0,0b \n"
+ " setlos 0,%2 \n"
+ "3: \n"
+ ".subsection 2 \n"
+ "4: setlos %5,%2 \n"
+ " bra 3b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .balign 8 \n"
+ " .long 1b,4b \n"
+ " .long 2b,4b \n"
+ ".previous"
+ : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
+ : "3"(oparg), "i"(-EFAULT)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ *_oldval = oldval;
+ return ret;
+}
+
+static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval)
+{
+ int oldval, ret;
+
+ asm("0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " add %1,%3,%3 \n"
+ "2: cst.p %3,%M0 ,cc3,#1 \n"
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
+ " beq icc3,#0,0b \n"
+ " setlos 0,%2 \n"
+ "3: \n"
+ ".subsection 2 \n"
+ "4: setlos %5,%2 \n"
+ " bra 3b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .balign 8 \n"
+ " .long 1b,4b \n"
+ " .long 2b,4b \n"
+ ".previous"
+ : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
+ : "3"(oparg), "i"(-EFAULT)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ *_oldval = oldval;
+ return ret;
+}
+
+static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval)
+{
+ int oldval, ret;
+
+ asm("0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " or %1,%3,%3 \n"
+ "2: cst.p %3,%M0 ,cc3,#1 \n"
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
+ " beq icc3,#0,0b \n"
+ " setlos 0,%2 \n"
+ "3: \n"
+ ".subsection 2 \n"
+ "4: setlos %5,%2 \n"
+ " bra 3b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .balign 8 \n"
+ " .long 1b,4b \n"
+ " .long 2b,4b \n"
+ ".previous"
+ : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
+ : "3"(oparg), "i"(-EFAULT)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ *_oldval = oldval;
+ return ret;
+}
+
+static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval)
+{
+ int oldval, ret;
+
+ asm("0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " and %1,%3,%3 \n"
+ "2: cst.p %3,%M0 ,cc3,#1 \n"
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
+ " beq icc3,#0,0b \n"
+ " setlos 0,%2 \n"
+ "3: \n"
+ ".subsection 2 \n"
+ "4: setlos %5,%2 \n"
+ " bra 3b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .balign 8 \n"
+ " .long 1b,4b \n"
+ " .long 2b,4b \n"
+ ".previous"
+ : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
+ : "3"(oparg), "i"(-EFAULT)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ *_oldval = oldval;
+ return ret;
+}
+
+static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval)
+{
+ int oldval, ret;
+
+ asm("0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " xor %1,%3,%3 \n"
+ "2: cst.p %3,%M0 ,cc3,#1 \n"
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
+ " beq icc3,#0,0b \n"
+ " setlos 0,%2 \n"
+ "3: \n"
+ ".subsection 2 \n"
+ "4: setlos %5,%2 \n"
+ " bra 3b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .balign 8 \n"
+ " .long 1b,4b \n"
+ " .long 2b,4b \n"
+ ".previous"
+ : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
+ : "3"(oparg), "i"(-EFAULT)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ *_oldval = oldval;
+ return ret;
+}
+
+/*****************************************************************************/
+/*
+ * do the futex operations
+ */
+int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval);
+ break;
+ case FUTEX_OP_ADD:
+ ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval);
+ break;
+ case FUTEX_OP_OR:
+ ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval);
+ break;
+ case FUTEX_OP_ANDN:
+ ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval);
+ break;
+ case FUTEX_OP_XOR:
+ ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval);
+ break;
+ default:
+ ret = -ENOSYS;
+ break;
+ }
+
+ pagefault_enable();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS; break;
+ }
+ }
+
+ return ret;
+
+} /* end futex_atomic_op_inuser() */
diff --git a/arch/frv/kernel/gdb-io.c b/arch/frv/kernel/gdb-io.c
new file mode 100644
index 00000000000..2ca641d199f
--- /dev/null
+++ b/arch/frv/kernel/gdb-io.c
@@ -0,0 +1,216 @@
+/* gdb-io.c: FR403 GDB stub I/O
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/serial_reg.h>
+
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/irc-regs.h>
+#include <asm/timer-regs.h>
+#include <asm/gdb-stub.h>
+#include "gdb-io.h"
+
+#ifdef CONFIG_GDBSTUB_UART0
+#define __UART(X) (*(volatile uint8_t *)(UART0_BASE + (UART_##X)))
+#define __UART_IRR_NMI 0xff0f0000
+#else /* CONFIG_GDBSTUB_UART1 */
+#define __UART(X) (*(volatile uint8_t *)(UART1_BASE + (UART_##X)))
+#define __UART_IRR_NMI 0xfff00000
+#endif
+
+#define LSR_WAIT_FOR(STATE) \
+do { \
+ gdbstub_do_rx(); \
+} while (!(__UART(LSR) & UART_LSR_##STATE))
+
+#define FLOWCTL_QUERY(LINE) ({ __UART(MSR) & UART_MSR_##LINE; })
+#define FLOWCTL_CLEAR(LINE) do { __UART(MCR) &= ~UART_MCR_##LINE; mb(); } while (0)
+#define FLOWCTL_SET(LINE) do { __UART(MCR) |= UART_MCR_##LINE; mb(); } while (0)
+
+#define FLOWCTL_WAIT_FOR(LINE) \
+do { \
+ gdbstub_do_rx(); \
+} while(!FLOWCTL_QUERY(LINE))
+
+/*****************************************************************************/
+/*
+ * initialise the GDB stub
+ * - called with PSR.ET==0, so can't incur external interrupts
+ */
+void gdbstub_io_init(void)
+{
+ /* set up the serial port */
+ __UART(LCR) = UART_LCR_WLEN8; /* 1N8 */
+ __UART(FCR) =
+ UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT |
+ UART_FCR_TRIGGER_1;
+
+ FLOWCTL_CLEAR(DTR);
+ FLOWCTL_SET(RTS);
+
+// gdbstub_set_baud(115200);
+
+ /* we want to get serial receive interrupts */
+ __UART(IER) = UART_IER_RDI | UART_IER_RLSI;
+ mb();
+
+ __set_IRR(6, __UART_IRR_NMI); /* map ERRs and UARTx to NMI */
+
+} /* end gdbstub_io_init() */
+
+/*****************************************************************************/
+/*
+ * set up the GDB stub serial port baud rate timers
+ */
+void gdbstub_set_baud(unsigned baud)
+{
+ unsigned value, high, low;
+ u8 lcr;
+
+ /* work out the divisor to give us the nearest higher baud rate */
+ value = __serial_clock_speed_HZ / 16 / baud;
+
+ /* determine the baud rate range */
+ high = __serial_clock_speed_HZ / 16 / value;
+ low = __serial_clock_speed_HZ / 16 / (value + 1);
+
+ /* pick the nearest bound */
+ if (low + (high - low) / 2 > baud)
+ value++;
+
+ lcr = __UART(LCR);
+ __UART(LCR) |= UART_LCR_DLAB;
+ mb();
+ __UART(DLL) = value & 0xff;
+ __UART(DLM) = (value >> 8) & 0xff;
+ mb();
+ __UART(LCR) = lcr;
+ mb();
+
+} /* end gdbstub_set_baud() */
+
+/*****************************************************************************/
+/*
+ * receive characters into the receive FIFO
+ */
+void gdbstub_do_rx(void)
+{
+ unsigned ix, nix;
+
+ ix = gdbstub_rx_inp;
+
+ while (__UART(LSR) & UART_LSR_DR) {
+ nix = (ix + 2) & 0xfff;
+ if (nix == gdbstub_rx_outp)
+ break;
+
+ gdbstub_rx_buffer[ix++] = __UART(LSR);
+ gdbstub_rx_buffer[ix++] = __UART(RX);
+ ix = nix;
+ }
+
+ gdbstub_rx_inp = ix;
+
+ __clr_RC(15);
+ __clr_IRL();
+
+} /* end gdbstub_do_rx() */
+
+/*****************************************************************************/
+/*
+ * wait for a character to come from the debugger
+ */
+int gdbstub_rx_char(unsigned char *_ch, int nonblock)
+{
+ unsigned ix;
+ u8 ch, st;
+
+ *_ch = 0xff;
+
+ if (gdbstub_rx_unget) {
+ *_ch = gdbstub_rx_unget;
+ gdbstub_rx_unget = 0;
+ return 0;
+ }
+
+ try_again:
+ gdbstub_do_rx();
+
+ /* pull chars out of the buffer */
+ ix = gdbstub_rx_outp;
+ if (ix == gdbstub_rx_inp) {
+ if (nonblock)
+ return -EAGAIN;
+ //watchdog_alert_counter = 0;
+ goto try_again;
+ }
+
+ st = gdbstub_rx_buffer[ix++];
+ ch = gdbstub_rx_buffer[ix++];
+ gdbstub_rx_outp = ix & 0x00000fff;
+
+ if (st & UART_LSR_BI) {
+ gdbstub_proto("### GDB Rx Break Detected ###\n");
+ return -EINTR;
+ }
+ else if (st & (UART_LSR_FE|UART_LSR_OE|UART_LSR_PE)) {
+ gdbstub_io("### GDB Rx Error (st=%02x) ###\n",st);
+ return -EIO;
+ }
+ else {
+ gdbstub_io("### GDB Rx %02x (st=%02x) ###\n",ch,st);
+ *_ch = ch & 0x7f;
+ return 0;
+ }
+
+} /* end gdbstub_rx_char() */
+
+/*****************************************************************************/
+/*
+ * send a character to the debugger
+ */
+void gdbstub_tx_char(unsigned char ch)
+{
+ FLOWCTL_SET(DTR);
+ LSR_WAIT_FOR(THRE);
+// FLOWCTL_WAIT_FOR(CTS);
+
+ if (ch == 0x0a) {
+ __UART(TX) = 0x0d;
+ mb();
+ LSR_WAIT_FOR(THRE);
+// FLOWCTL_WAIT_FOR(CTS);
+ }
+ __UART(TX) = ch;
+ mb();
+
+ FLOWCTL_CLEAR(DTR);
+} /* end gdbstub_tx_char() */
+
+/*****************************************************************************/
+/*
+ * send a character to the debugger
+ */
+void gdbstub_tx_flush(void)
+{
+ LSR_WAIT_FOR(TEMT);
+ LSR_WAIT_FOR(THRE);
+ FLOWCTL_CLEAR(DTR);
+} /* end gdbstub_tx_flush() */
diff --git a/arch/frv/kernel/gdb-io.h b/arch/frv/kernel/gdb-io.h
new file mode 100644
index 00000000000..138714bacc4
--- /dev/null
+++ b/arch/frv/kernel/gdb-io.h
@@ -0,0 +1,55 @@
+/* gdb-io.h: FR403 GDB I/O port defs
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _GDB_IO_H
+#define _GDB_IO_H
+
+#include <asm/serial-regs.h>
+
+#undef UART_RX
+#undef UART_TX
+#undef UART_DLL
+#undef UART_DLM
+#undef UART_IER
+#undef UART_IIR
+#undef UART_FCR
+#undef UART_LCR
+#undef UART_MCR
+#undef UART_LSR
+#undef UART_MSR
+#undef UART_SCR
+
+#define UART_RX 0*8 /* In: Receive buffer (DLAB=0) */
+#define UART_TX 0*8 /* Out: Transmit buffer (DLAB=0) */
+#define UART_DLL 0*8 /* Out: Divisor Latch Low (DLAB=1) */
+#define UART_DLM 1*8 /* Out: Divisor Latch High (DLAB=1) */
+#define UART_IER 1*8 /* Out: Interrupt Enable Register */
+#define UART_IIR 2*8 /* In: Interrupt ID Register */
+#define UART_FCR 2*8 /* Out: FIFO Control Register */
+#define UART_LCR 3*8 /* Out: Line Control Register */
+#define UART_MCR 4*8 /* Out: Modem Control Register */
+#define UART_LSR 5*8 /* In: Line Status Register */
+#define UART_MSR 6*8 /* In: Modem Status Register */
+#define UART_SCR 7*8 /* I/O: Scratch Register */
+
+#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
+#define UART_LCR_SBC 0x40 /* Set break control */
+#define UART_LCR_SPAR 0x20 /* Stick parity (?) */
+#define UART_LCR_EPAR 0x10 /* Even parity select */
+#define UART_LCR_PARITY 0x08 /* Parity Enable */
+#define UART_LCR_STOP 0x04 /* Stop bits: 0=1 stop bit, 1= 2 stop bits */
+#define UART_LCR_WLEN5 0x00 /* Wordlength: 5 bits */
+#define UART_LCR_WLEN6 0x01 /* Wordlength: 6 bits */
+#define UART_LCR_WLEN7 0x02 /* Wordlength: 7 bits */
+#define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */
+
+
+#endif /* _GDB_IO_H */
diff --git a/arch/frv/kernel/gdb-stub.c b/arch/frv/kernel/gdb-stub.c
new file mode 100644
index 00000000000..a6d5381c94f
--- /dev/null
+++ b/arch/frv/kernel/gdb-stub.c
@@ -0,0 +1,2150 @@
+/* gdb-stub.c: FRV GDB stub
+ *
+ * Copyright (C) 2003,4 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ * - Derived from Linux/MIPS version, Copyright (C) 1995 Andreas Busse
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * To enable debugger support, two things need to happen. One, a
+ * call to set_debug_traps() is necessary in order to allow any breakpoints
+ * or error conditions to be properly intercepted and reported to gdb.
+ * Two, a breakpoint needs to be generated to begin communication. This
+ * is most easily accomplished by a call to breakpoint(). Breakpoint()
+ * simulates a breakpoint by executing a BREAK instruction.
+ *
+ *
+ * The following gdb commands are supported:
+ *
+ * command function Return value
+ *
+ * g return the value of the CPU registers hex data or ENN
+ * G set the value of the CPU registers OK or ENN
+ *
+ * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
+ * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
+ *
+ * c Resume at current address SNN ( signal NN)
+ * cAA..AA Continue at address AA..AA SNN
+ *
+ * s Step one instruction SNN
+ * sAA..AA Step one instruction from AA..AA SNN
+ *
+ * k kill
+ *
+ * ? What was the last sigval ? SNN (signal NN)
+ *
+ * bBB..BB Set baud rate to BB..BB OK or BNN, then sets
+ * baud rate
+ *
+ * All commands and responses are sent with a packet which includes a
+ * checksum. A packet consists of
+ *
+ * $<packet info>#<checksum>.
+ *
+ * where
+ * <packet info> :: <characters representing the command or response>
+ * <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>>
+ *
+ * When a packet is received, it is first acknowledged with either '+' or '-'.
+ * '+' indicates a successful transfer. '-' indicates a failed transfer.
+ *
+ * Example:
+ *
+ * Host: Reply:
+ * $m0,10#2a +$00010203040506070809101112131415#42
+ *
+ *
+ * ==============
+ * MORE EXAMPLES:
+ * ==============
+ *
+ * For reference -- the following are the steps that one
+ * company took (RidgeRun Inc) to get remote gdb debugging
+ * going. In this scenario the host machine was a PC and the
+ * target platform was a Galileo EVB64120A MIPS evaluation
+ * board.
+ *
+ * Step 1:
+ * First download gdb-5.0.tar.gz from the internet.
+ * and then build/install the package.
+ *
+ * Example:
+ * $ tar zxf gdb-5.0.tar.gz
+ * $ cd gdb-5.0
+ * $ ./configure --target=frv-elf-gdb
+ * $ make
+ * $ frv-elf-gdb
+ *
+ * Step 2:
+ * Configure linux for remote debugging and build it.
+ *
+ * Example:
+ * $ cd ~/linux
+ * $ make menuconfig <go to "Kernel Hacking" and turn on remote debugging>
+ * $ make vmlinux
+ *
+ * Step 3:
+ * Download the kernel to the remote target and start
+ * the kernel running. It will promptly halt and wait
+ * for the host gdb session to connect. It does this
+ * since the "Kernel Hacking" option has defined
+ * CONFIG_REMOTE_DEBUG which in turn enables your calls
+ * to:
+ * set_debug_traps();
+ * breakpoint();
+ *
+ * Step 4:
+ * Start the gdb session on the host.
+ *
+ * Example:
+ * $ frv-elf-gdb vmlinux
+ * (gdb) set remotebaud 115200
+ * (gdb) target remote /dev/ttyS1
+ * ...at this point you are connected to
+ * the remote target and can use gdb
+ * in the normal fasion. Setting
+ * breakpoints, single stepping,
+ * printing variables, etc.
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/nmi.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/gdb-stub.h>
+
+#define LEDS(x) do { /* *(u32*)0xe1200004 = ~(x); mb(); */ } while(0)
+
+#undef GDBSTUB_DEBUG_PROTOCOL
+
+extern void debug_to_serial(const char *p, int n);
+extern void gdbstub_console_write(struct console *co, const char *p, unsigned n);
+
+extern volatile uint32_t __break_error_detect[3]; /* ESFR1, ESR15, EAR15 */
+
+struct __debug_amr {
+ unsigned long L, P;
+} __attribute__((aligned(8)));
+
+struct __debug_mmu {
+ struct {
+ unsigned long hsr0, pcsr, esr0, ear0, epcr0;
+#ifdef CONFIG_MMU
+ unsigned long tplr, tppr, tpxr, cxnr;
+#endif
+ } regs;
+
+ struct __debug_amr iamr[16];
+ struct __debug_amr damr[16];
+
+#ifdef CONFIG_MMU
+ struct __debug_amr tlb[64*2];
+#endif
+};
+
+static struct __debug_mmu __debug_mmu;
+
+/*
+ * BUFMAX defines the maximum number of characters in inbound/outbound buffers
+ * at least NUMREGBYTES*2 are needed for register packets
+ */
+#define BUFMAX 2048
+
+#define BREAK_INSN 0x801000c0 /* use "break" as bkpt */
+
+static const char gdbstub_banner[] = "Linux/FR-V GDB Stub (c) RedHat 2003\n";
+
+volatile u8 gdbstub_rx_buffer[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+volatile u32 gdbstub_rx_inp = 0;
+volatile u32 gdbstub_rx_outp = 0;
+volatile u8 gdbstub_rx_overflow = 0;
+u8 gdbstub_rx_unget = 0;
+
+/* set with GDB whilst running to permit step through exceptions */
+extern volatile u32 __attribute__((section(".bss"))) gdbstub_trace_through_exceptions;
+
+static char input_buffer[BUFMAX];
+static char output_buffer[BUFMAX];
+
+static const char *regnames[] = {
+ "PSR ", "ISR ", "CCR ", "CCCR",
+ "LR ", "LCR ", "PC ", "_stt",
+ "sys ", "GR8*", "GNE0", "GNE1",
+ "IACH", "IACL",
+ "TBR ", "SP ", "FP ", "GR3 ",
+ "GR4 ", "GR5 ", "GR6 ", "GR7 ",
+ "GR8 ", "GR9 ", "GR10", "GR11",
+ "GR12", "GR13", "GR14", "GR15",
+ "GR16", "GR17", "GR18", "GR19",
+ "GR20", "GR21", "GR22", "GR23",
+ "GR24", "GR25", "GR26", "GR27",
+ "EFRM", "CURR", "GR30", "BFRM"
+};
+
+struct gdbstub_bkpt {
+ unsigned long addr; /* address of breakpoint */
+ unsigned len; /* size of breakpoint */
+ uint32_t originsns[7]; /* original instructions */
+};
+
+static struct gdbstub_bkpt gdbstub_bkpts[256];
+
+/*
+ * local prototypes
+ */
+
+static void gdbstub_recv_packet(char *buffer);
+static int gdbstub_send_packet(char *buffer);
+static int gdbstub_compute_signal(unsigned long tbr);
+static int hex(unsigned char ch);
+static int hexToInt(char **ptr, unsigned long *intValue);
+static unsigned char *mem2hex(const void *mem, char *buf, int count, int may_fault);
+static char *hex2mem(const char *buf, void *_mem, int count);
+
+/*
+ * Convert ch from a hex digit to an int
+ */
+static int hex(unsigned char ch)
+{
+ if (ch >= 'a' && ch <= 'f')
+ return ch-'a'+10;
+ if (ch >= '0' && ch <= '9')
+ return ch-'0';
+ if (ch >= 'A' && ch <= 'F')
+ return ch-'A'+10;
+ return -1;
+}
+
+void gdbstub_printk(const char *fmt, ...)
+{
+ static char buf[1024];
+ va_list args;
+ int len;
+
+ /* Emit the output into the temporary buffer */
+ va_start(args, fmt);
+ len = vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+ debug_to_serial(buf, len);
+}
+
+static inline char *gdbstub_strcpy(char *dst, const char *src)
+{
+ int loop = 0;
+ while ((dst[loop] = src[loop]))
+ loop++;
+ return dst;
+}
+
+static void gdbstub_purge_cache(void)
+{
+ asm volatile(" dcef @(gr0,gr0),#1 \n"
+ " icei @(gr0,gr0),#1 \n"
+ " membar \n"
+ " bar \n"
+ );
+}
+
+/*****************************************************************************/
+/*
+ * scan for the sequence $<data>#<checksum>
+ */
+static void gdbstub_recv_packet(char *buffer)
+{
+ unsigned char checksum;
+ unsigned char xmitcsum;
+ unsigned char ch;
+ int count, i, ret, error;
+
+ for (;;) {
+ /* wait around for the start character, ignore all other characters */
+ do {
+ gdbstub_rx_char(&ch, 0);
+ } while (ch != '$');
+
+ checksum = 0;
+ xmitcsum = -1;
+ count = 0;
+ error = 0;
+
+ /* now, read until a # or end of buffer is found */
+ while (count < BUFMAX) {
+ ret = gdbstub_rx_char(&ch, 0);
+ if (ret < 0)
+ error = ret;
+
+ if (ch == '#')
+ break;
+ checksum += ch;
+ buffer[count] = ch;
+ count++;
+ }
+
+ if (error == -EIO) {
+ gdbstub_proto("### GDB Rx Error - Skipping packet ###\n");
+ gdbstub_proto("### GDB Tx NAK\n");
+ gdbstub_tx_char('-');
+ continue;
+ }
+
+ if (count >= BUFMAX || error)
+ continue;
+
+ buffer[count] = 0;
+
+ /* read the checksum */
+ ret = gdbstub_rx_char(&ch, 0);
+ if (ret < 0)
+ error = ret;
+ xmitcsum = hex(ch) << 4;
+
+ ret = gdbstub_rx_char(&ch, 0);
+ if (ret < 0)
+ error = ret;
+ xmitcsum |= hex(ch);
+
+ if (error) {
+ if (error == -EIO)
+ gdbstub_proto("### GDB Rx Error - Skipping packet\n");
+ gdbstub_proto("### GDB Tx NAK\n");
+ gdbstub_tx_char('-');
+ continue;
+ }
+
+ /* check the checksum */
+ if (checksum != xmitcsum) {
+ gdbstub_proto("### GDB Tx NAK\n");
+ gdbstub_tx_char('-'); /* failed checksum */
+ continue;
+ }
+
+ gdbstub_proto("### GDB Rx '$%s#%02x' ###\n", buffer, checksum);
+ gdbstub_proto("### GDB Tx ACK\n");
+ gdbstub_tx_char('+'); /* successful transfer */
+
+ /* if a sequence char is present, reply the sequence ID */
+ if (buffer[2] == ':') {
+ gdbstub_tx_char(buffer[0]);
+ gdbstub_tx_char(buffer[1]);
+
+ /* remove sequence chars from buffer */
+ count = 0;
+ while (buffer[count]) count++;
+ for (i=3; i <= count; i++)
+ buffer[i - 3] = buffer[i];
+ }
+
+ break;
+ }
+} /* end gdbstub_recv_packet() */
+
+/*****************************************************************************/
+/*
+ * send the packet in buffer.
+ * - return 0 if successfully ACK'd
+ * - return 1 if abandoned due to new incoming packet
+ */
+static int gdbstub_send_packet(char *buffer)
+{
+ unsigned char checksum;
+ int count;
+ unsigned char ch;
+
+ /* $<packet info>#<checksum> */
+ gdbstub_proto("### GDB Tx '%s' ###\n", buffer);
+
+ do {
+ gdbstub_tx_char('$');
+ checksum = 0;
+ count = 0;
+
+ while ((ch = buffer[count]) != 0) {
+ gdbstub_tx_char(ch);
+ checksum += ch;
+ count += 1;
+ }
+
+ gdbstub_tx_char('#');
+ gdbstub_tx_char(hex_asc_hi(checksum));
+ gdbstub_tx_char(hex_asc_lo(checksum));
+
+ } while (gdbstub_rx_char(&ch,0),
+#ifdef GDBSTUB_DEBUG_PROTOCOL
+ ch=='-' && (gdbstub_proto("### GDB Rx NAK\n"),0),
+ ch!='-' && ch!='+' && (gdbstub_proto("### GDB Rx ??? %02x\n",ch),0),
+#endif
+ ch!='+' && ch!='$');
+
+ if (ch=='+') {
+ gdbstub_proto("### GDB Rx ACK\n");
+ return 0;
+ }
+
+ gdbstub_proto("### GDB Tx Abandoned\n");
+ gdbstub_rx_unget = ch;
+ return 1;
+} /* end gdbstub_send_packet() */
+
+/*
+ * While we find nice hex chars, build an int.
+ * Return number of chars processed.
+ */
+static int hexToInt(char **ptr, unsigned long *_value)
+{
+ int count = 0, ch;
+
+ *_value = 0;
+ while (**ptr) {
+ ch = hex(**ptr);
+ if (ch < 0)
+ break;
+
+ *_value = (*_value << 4) | ((uint8_t) ch & 0xf);
+ count++;
+
+ (*ptr)++;
+ }
+
+ return count;
+}
+
+/*****************************************************************************/
+/*
+ * probe an address to see whether it maps to anything
+ */
+static inline int gdbstub_addr_probe(const void *vaddr)
+{
+#ifdef CONFIG_MMU
+ unsigned long paddr;
+
+ asm("lrad %1,%0,#1,#0,#0" : "=r"(paddr) : "r"(vaddr));
+ if (!(paddr & xAMPRx_V))
+ return 0;
+#endif
+
+ return 1;
+} /* end gdbstub_addr_probe() */
+
+#ifdef CONFIG_MMU
+static unsigned long __saved_dampr, __saved_damlr;
+
+static inline unsigned long gdbstub_virt_to_pte(unsigned long vaddr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long val, dampr5;
+
+ pgd = (pgd_t *) __get_DAMLR(3) + pgd_index(vaddr);
+ pud = pud_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
+
+ if (pmd_bad(*pmd) || !pmd_present(*pmd))
+ return 0;
+
+ /* make sure dampr5 maps to the correct pmd */
+ dampr5 = __get_DAMPR(5);
+ val = pmd_val(*pmd);
+ __set_DAMPR(5, val | xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C | xAMPRx_V);
+
+ /* now its safe to access pmd */
+ pte = (pte_t *)__get_DAMLR(5) + __pte_index(vaddr);
+ if (pte_present(*pte))
+ val = pte_val(*pte);
+ else
+ val = 0;
+
+ /* restore original dampr5 */
+ __set_DAMPR(5, dampr5);
+
+ return val;
+}
+#endif
+
+static inline int gdbstub_addr_map(const void *vaddr)
+{
+#ifdef CONFIG_MMU
+ unsigned long pte;
+
+ __saved_dampr = __get_DAMPR(2);
+ __saved_damlr = __get_DAMLR(2);
+#endif
+ if (gdbstub_addr_probe(vaddr))
+ return 1;
+#ifdef CONFIG_MMU
+ pte = gdbstub_virt_to_pte((unsigned long) vaddr);
+ if (pte) {
+ __set_DAMPR(2, pte);
+ __set_DAMLR(2, (unsigned long) vaddr & PAGE_MASK);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+static inline void gdbstub_addr_unmap(void)
+{
+#ifdef CONFIG_MMU
+ __set_DAMPR(2, __saved_dampr);
+ __set_DAMLR(2, __saved_damlr);
+#endif
+}
+
+/*
+ * access potentially dodgy memory through a potentially dodgy pointer
+ */
+static inline int gdbstub_read_dword(const void *addr, uint32_t *_res)
+{
+ unsigned long brr;
+ uint32_t res;
+
+ if (!gdbstub_addr_map(addr))
+ return 0;
+
+ asm volatile(" movgs gr0,brr \n"
+ " ld%I2 %M2,%0 \n"
+ " movsg brr,%1 \n"
+ : "=r"(res), "=r"(brr)
+ : "m"(*(uint32_t *) addr));
+ *_res = res;
+ gdbstub_addr_unmap();
+ return likely(!brr);
+}
+
+static inline int gdbstub_write_dword(void *addr, uint32_t val)
+{
+ unsigned long brr;
+
+ if (!gdbstub_addr_map(addr))
+ return 0;
+
+ asm volatile(" movgs gr0,brr \n"
+ " st%I2 %1,%M2 \n"
+ " movsg brr,%0 \n"
+ : "=r"(brr)
+ : "r"(val), "m"(*(uint32_t *) addr));
+ gdbstub_addr_unmap();
+ return likely(!brr);
+}
+
+static inline int gdbstub_read_word(const void *addr, uint16_t *_res)
+{
+ unsigned long brr;
+ uint16_t res;
+
+ if (!gdbstub_addr_map(addr))
+ return 0;
+
+ asm volatile(" movgs gr0,brr \n"
+ " lduh%I2 %M2,%0 \n"
+ " movsg brr,%1 \n"
+ : "=r"(res), "=r"(brr)
+ : "m"(*(uint16_t *) addr));
+ *_res = res;
+ gdbstub_addr_unmap();
+ return likely(!brr);
+}
+
+static inline int gdbstub_write_word(void *addr, uint16_t val)
+{
+ unsigned long brr;
+
+ if (!gdbstub_addr_map(addr))
+ return 0;
+
+ asm volatile(" movgs gr0,brr \n"
+ " sth%I2 %1,%M2 \n"
+ " movsg brr,%0 \n"
+ : "=r"(brr)
+ : "r"(val), "m"(*(uint16_t *) addr));
+ gdbstub_addr_unmap();
+ return likely(!brr);
+}
+
+static inline int gdbstub_read_byte(const void *addr, uint8_t *_res)
+{
+ unsigned long brr;
+ uint8_t res;
+
+ if (!gdbstub_addr_map(addr))
+ return 0;
+
+ asm volatile(" movgs gr0,brr \n"
+ " ldub%I2 %M2,%0 \n"
+ " movsg brr,%1 \n"
+ : "=r"(res), "=r"(brr)
+ : "m"(*(uint8_t *) addr));
+ *_res = res;
+ gdbstub_addr_unmap();
+ return likely(!brr);
+}
+
+static inline int gdbstub_write_byte(void *addr, uint8_t val)
+{
+ unsigned long brr;
+
+ if (!gdbstub_addr_map(addr))
+ return 0;
+
+ asm volatile(" movgs gr0,brr \n"
+ " stb%I2 %1,%M2 \n"
+ " movsg brr,%0 \n"
+ : "=r"(brr)
+ : "r"(val), "m"(*(uint8_t *) addr));
+ gdbstub_addr_unmap();
+ return likely(!brr);
+}
+
+static void __gdbstub_console_write(struct console *co, const char *p, unsigned n)
+{
+ char outbuf[26];
+ int qty;
+
+ outbuf[0] = 'O';
+
+ while (n > 0) {
+ qty = 1;
+
+ while (n > 0 && qty < 20) {
+ mem2hex(p, outbuf + qty, 2, 0);
+ qty += 2;
+ if (*p == 0x0a) {
+ outbuf[qty++] = '0';
+ outbuf[qty++] = 'd';
+ }
+ p++;
+ n--;
+ }
+
+ outbuf[qty] = 0;
+ gdbstub_send_packet(outbuf);
+ }
+}
+
+#if 0
+void debug_to_serial(const char *p, int n)
+{
+ gdbstub_console_write(NULL,p,n);
+}
+#endif
+
+#ifdef CONFIG_GDB_CONSOLE
+
+static struct console gdbstub_console = {
+ .name = "gdb",
+ .write = gdbstub_console_write, /* in break.S */
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+#endif
+
+/*****************************************************************************/
+/*
+ * Convert the memory pointed to by mem into hex, placing result in buf.
+ * - if successful, return a pointer to the last char put in buf (NUL)
+ * - in case of mem fault, return NULL
+ * may_fault is non-zero if we are reading from arbitrary memory, but is currently
+ * not used.
+ */
+static unsigned char *mem2hex(const void *_mem, char *buf, int count, int may_fault)
+{
+ const uint8_t *mem = _mem;
+ uint8_t ch[4] __attribute__((aligned(4)));
+
+ if ((uint32_t)mem&1 && count>=1) {
+ if (!gdbstub_read_byte(mem,ch))
+ return NULL;
+ buf = hex_byte_pack(buf, ch[0]);
+ mem++;
+ count--;
+ }
+
+ if ((uint32_t)mem&3 && count>=2) {
+ if (!gdbstub_read_word(mem,(uint16_t *)ch))
+ return NULL;
+ buf = hex_byte_pack(buf, ch[0]);
+ buf = hex_byte_pack(buf, ch[1]);
+ mem += 2;
+ count -= 2;
+ }
+
+ while (count>=4) {
+ if (!gdbstub_read_dword(mem,(uint32_t *)ch))
+ return NULL;
+ buf = hex_byte_pack(buf, ch[0]);
+ buf = hex_byte_pack(buf, ch[1]);
+ buf = hex_byte_pack(buf, ch[2]);
+ buf = hex_byte_pack(buf, ch[3]);
+ mem += 4;
+ count -= 4;
+ }
+
+ if (count>=2) {
+ if (!gdbstub_read_word(mem,(uint16_t *)ch))
+ return NULL;
+ buf = hex_byte_pack(buf, ch[0]);
+ buf = hex_byte_pack(buf, ch[1]);
+ mem += 2;
+ count -= 2;
+ }
+
+ if (count>=1) {
+ if (!gdbstub_read_byte(mem,ch))
+ return NULL;
+ buf = hex_byte_pack(buf, ch[0]);
+ }
+
+ *buf = 0;
+
+ return buf;
+} /* end mem2hex() */
+
+/*****************************************************************************/
+/*
+ * convert the hex array pointed to by buf into binary to be placed in mem
+ * return a pointer to the character AFTER the last byte of buffer consumed
+ */
+static char *hex2mem(const char *buf, void *_mem, int count)
+{
+ uint8_t *mem = _mem;
+ union {
+ uint32_t l;
+ uint16_t w;
+ uint8_t b[4];
+ } ch;
+
+ if ((u32)mem&1 && count>=1) {
+ ch.b[0] = hex(*buf++) << 4;
+ ch.b[0] |= hex(*buf++);
+ if (!gdbstub_write_byte(mem,ch.b[0]))
+ return NULL;
+ mem++;
+ count--;
+ }
+
+ if ((u32)mem&3 && count>=2) {
+ ch.b[0] = hex(*buf++) << 4;
+ ch.b[0] |= hex(*buf++);
+ ch.b[1] = hex(*buf++) << 4;
+ ch.b[1] |= hex(*buf++);
+ if (!gdbstub_write_word(mem,ch.w))
+ return NULL;
+ mem += 2;
+ count -= 2;
+ }
+
+ while (count>=4) {
+ ch.b[0] = hex(*buf++) << 4;
+ ch.b[0] |= hex(*buf++);
+ ch.b[1] = hex(*buf++) << 4;
+ ch.b[1] |= hex(*buf++);
+ ch.b[2] = hex(*buf++) << 4;
+ ch.b[2] |= hex(*buf++);
+ ch.b[3] = hex(*buf++) << 4;
+ ch.b[3] |= hex(*buf++);
+ if (!gdbstub_write_dword(mem,ch.l))
+ return NULL;
+ mem += 4;
+ count -= 4;
+ }
+
+ if (count>=2) {
+ ch.b[0] = hex(*buf++) << 4;
+ ch.b[0] |= hex(*buf++);
+ ch.b[1] = hex(*buf++) << 4;
+ ch.b[1] |= hex(*buf++);
+ if (!gdbstub_write_word(mem,ch.w))
+ return NULL;
+ mem += 2;
+ count -= 2;
+ }
+
+ if (count>=1) {
+ ch.b[0] = hex(*buf++) << 4;
+ ch.b[0] |= hex(*buf++);
+ if (!gdbstub_write_byte(mem,ch.b[0]))
+ return NULL;
+ }
+
+ return (char *) buf;
+} /* end hex2mem() */
+
+/*****************************************************************************/
+/*
+ * This table contains the mapping between FRV TBR.TT exception codes,
+ * and signals, which are primarily what GDB understands. It also
+ * indicates which hardware traps we need to commandeer when
+ * initializing the stub.
+ */
+static const struct brr_to_sig_map {
+ unsigned long brr_mask; /* BRR bitmask */
+ unsigned long tbr_tt; /* TBR.TT code (in BRR.EBTT) */
+ unsigned int signo; /* Signal that we map this into */
+} brr_to_sig_map[] = {
+ { BRR_EB, TBR_TT_INSTR_ACC_ERROR, SIGSEGV },
+ { BRR_EB, TBR_TT_ILLEGAL_INSTR, SIGILL },
+ { BRR_EB, TBR_TT_PRIV_INSTR, SIGILL },
+ { BRR_EB, TBR_TT_MP_EXCEPTION, SIGFPE },
+ { BRR_EB, TBR_TT_DATA_ACC_ERROR, SIGSEGV },
+ { BRR_EB, TBR_TT_DATA_STR_ERROR, SIGSEGV },
+ { BRR_EB, TBR_TT_DIVISION_EXCEP, SIGFPE },
+ { BRR_EB, TBR_TT_COMPOUND_EXCEP, SIGSEGV },
+ { BRR_EB, TBR_TT_INTERRUPT_13, SIGALRM }, /* watchdog */
+ { BRR_EB, TBR_TT_INTERRUPT_14, SIGINT }, /* GDB serial */
+ { BRR_EB, TBR_TT_INTERRUPT_15, SIGQUIT }, /* NMI */
+ { BRR_CB, 0, SIGUSR1 },
+ { BRR_TB, 0, SIGUSR2 },
+ { BRR_DBNEx, 0, SIGTRAP },
+ { BRR_DBx, 0, SIGTRAP }, /* h/w watchpoint */
+ { BRR_IBx, 0, SIGTRAP }, /* h/w breakpoint */
+ { BRR_CBB, 0, SIGTRAP },
+ { BRR_SB, 0, SIGTRAP },
+ { BRR_ST, 0, SIGTRAP }, /* single step */
+ { 0, 0, SIGHUP } /* default */
+};
+
+/*****************************************************************************/
+/*
+ * convert the FRV BRR register contents into a UNIX signal number
+ */
+static inline int gdbstub_compute_signal(unsigned long brr)
+{
+ const struct brr_to_sig_map *map;
+ unsigned long tbr = (brr & BRR_EBTT) >> 12;
+
+ for (map = brr_to_sig_map; map->brr_mask; map++)
+ if (map->brr_mask & brr)
+ if (!map->tbr_tt || map->tbr_tt == tbr)
+ break;
+
+ return map->signo;
+} /* end gdbstub_compute_signal() */
+
+/*****************************************************************************/
+/*
+ * set a software breakpoint or a hardware breakpoint or watchpoint
+ */
+static int gdbstub_set_breakpoint(unsigned long type, unsigned long addr, unsigned long len)
+{
+ unsigned long tmp;
+ int bkpt, loop, xloop;
+
+ union {
+ struct {
+ unsigned long mask0, mask1;
+ };
+ uint8_t bytes[8];
+ } dbmr;
+
+ //gdbstub_printk("setbkpt(%ld,%08lx,%ld)\n", type, addr, len);
+
+ switch (type) {
+ /* set software breakpoint */
+ case 0:
+ if (addr & 3 || len > 7*4)
+ return -EINVAL;
+
+ for (bkpt = 255; bkpt >= 0; bkpt--)
+ if (!gdbstub_bkpts[bkpt].addr)
+ break;
+ if (bkpt < 0)
+ return -ENOSPC;
+
+ for (loop = 0; loop < len/4; loop++)
+ if (!gdbstub_read_dword(&((uint32_t *) addr)[loop],
+ &gdbstub_bkpts[bkpt].originsns[loop]))
+ return -EFAULT;
+
+ for (loop = 0; loop < len/4; loop++)
+ if (!gdbstub_write_dword(&((uint32_t *) addr)[loop],
+ BREAK_INSN)
+ ) {
+ /* need to undo the changes if possible */
+ for (xloop = 0; xloop < loop; xloop++)
+ gdbstub_write_dword(&((uint32_t *) addr)[xloop],
+ gdbstub_bkpts[bkpt].originsns[xloop]);
+ return -EFAULT;
+ }
+
+ gdbstub_bkpts[bkpt].addr = addr;
+ gdbstub_bkpts[bkpt].len = len;
+
+#if 0
+ gdbstub_printk("Set BKPT[%02x]: %08lx #%d {%04x, %04x} -> { %04x, %04x }\n",
+ bkpt,
+ gdbstub_bkpts[bkpt].addr,
+ gdbstub_bkpts[bkpt].len,
+ gdbstub_bkpts[bkpt].originsns[0],
+ gdbstub_bkpts[bkpt].originsns[1],
+ ((uint32_t *) addr)[0],
+ ((uint32_t *) addr)[1]
+ );
+#endif
+ return 0;
+
+ /* set hardware breakpoint */
+ case 1:
+ if (addr & 3 || len != 4)
+ return -EINVAL;
+
+ if (!(__debug_regs->dcr & DCR_IBE0)) {
+ //gdbstub_printk("set h/w break 0: %08lx\n", addr);
+ __debug_regs->dcr |= DCR_IBE0;
+ __debug_regs->ibar[0] = addr;
+ asm volatile("movgs %0,ibar0" : : "r"(addr));
+ return 0;
+ }
+
+ if (!(__debug_regs->dcr & DCR_IBE1)) {
+ //gdbstub_printk("set h/w break 1: %08lx\n", addr);
+ __debug_regs->dcr |= DCR_IBE1;
+ __debug_regs->ibar[1] = addr;
+ asm volatile("movgs %0,ibar1" : : "r"(addr));
+ return 0;
+ }
+
+ if (!(__debug_regs->dcr & DCR_IBE2)) {
+ //gdbstub_printk("set h/w break 2: %08lx\n", addr);
+ __debug_regs->dcr |= DCR_IBE2;
+ __debug_regs->ibar[2] = addr;
+ asm volatile("movgs %0,ibar2" : : "r"(addr));
+ return 0;
+ }
+
+ if (!(__debug_regs->dcr & DCR_IBE3)) {
+ //gdbstub_printk("set h/w break 3: %08lx\n", addr);
+ __debug_regs->dcr |= DCR_IBE3;
+ __debug_regs->ibar[3] = addr;
+ asm volatile("movgs %0,ibar3" : : "r"(addr));
+ return 0;
+ }
+
+ return -ENOSPC;
+
+ /* set data read/write/access watchpoint */
+ case 2:
+ case 3:
+ case 4:
+ if ((addr & ~7) != ((addr + len - 1) & ~7))
+ return -EINVAL;
+
+ tmp = addr & 7;
+
+ memset(dbmr.bytes, 0xff, sizeof(dbmr.bytes));
+ for (loop = 0; loop < len; loop++)
+ dbmr.bytes[tmp + loop] = 0;
+
+ addr &= ~7;
+
+ if (!(__debug_regs->dcr & (DCR_DRBE0|DCR_DWBE0))) {
+ //gdbstub_printk("set h/w watchpoint 0 type %ld: %08lx\n", type, addr);
+ tmp = type==2 ? DCR_DWBE0 : type==3 ? DCR_DRBE0 : DCR_DRBE0|DCR_DWBE0;
+
+ __debug_regs->dcr |= tmp;
+ __debug_regs->dbar[0] = addr;
+ __debug_regs->dbmr[0][0] = dbmr.mask0;
+ __debug_regs->dbmr[0][1] = dbmr.mask1;
+ __debug_regs->dbdr[0][0] = 0;
+ __debug_regs->dbdr[0][1] = 0;
+
+ asm volatile(" movgs %0,dbar0 \n"
+ " movgs %1,dbmr00 \n"
+ " movgs %2,dbmr01 \n"
+ " movgs gr0,dbdr00 \n"
+ " movgs gr0,dbdr01 \n"
+ : : "r"(addr), "r"(dbmr.mask0), "r"(dbmr.mask1));
+ return 0;
+ }
+
+ if (!(__debug_regs->dcr & (DCR_DRBE1|DCR_DWBE1))) {
+ //gdbstub_printk("set h/w watchpoint 1 type %ld: %08lx\n", type, addr);
+ tmp = type==2 ? DCR_DWBE1 : type==3 ? DCR_DRBE1 : DCR_DRBE1|DCR_DWBE1;
+
+ __debug_regs->dcr |= tmp;
+ __debug_regs->dbar[1] = addr;
+ __debug_regs->dbmr[1][0] = dbmr.mask0;
+ __debug_regs->dbmr[1][1] = dbmr.mask1;
+ __debug_regs->dbdr[1][0] = 0;
+ __debug_regs->dbdr[1][1] = 0;
+
+ asm volatile(" movgs %0,dbar1 \n"
+ " movgs %1,dbmr10 \n"
+ " movgs %2,dbmr11 \n"
+ " movgs gr0,dbdr10 \n"
+ " movgs gr0,dbdr11 \n"
+ : : "r"(addr), "r"(dbmr.mask0), "r"(dbmr.mask1));
+ return 0;
+ }
+
+ return -ENOSPC;
+
+ default:
+ return -EINVAL;
+ }
+
+} /* end gdbstub_set_breakpoint() */
+
+/*****************************************************************************/
+/*
+ * clear a breakpoint or watchpoint
+ */
+int gdbstub_clear_breakpoint(unsigned long type, unsigned long addr, unsigned long len)
+{
+ unsigned long tmp;
+ int bkpt, loop;
+
+ union {
+ struct {
+ unsigned long mask0, mask1;
+ };
+ uint8_t bytes[8];
+ } dbmr;
+
+ //gdbstub_printk("clearbkpt(%ld,%08lx,%ld)\n", type, addr, len);
+
+ switch (type) {
+ /* clear software breakpoint */
+ case 0:
+ for (bkpt = 255; bkpt >= 0; bkpt--)
+ if (gdbstub_bkpts[bkpt].addr == addr && gdbstub_bkpts[bkpt].len == len)
+ break;
+ if (bkpt < 0)
+ return -ENOENT;
+
+ gdbstub_bkpts[bkpt].addr = 0;
+
+ for (loop = 0; loop < len/4; loop++)
+ if (!gdbstub_write_dword(&((uint32_t *) addr)[loop],
+ gdbstub_bkpts[bkpt].originsns[loop]))
+ return -EFAULT;
+ return 0;
+
+ /* clear hardware breakpoint */
+ case 1:
+ if (addr & 3 || len != 4)
+ return -EINVAL;
+
+#define __get_ibar(X) ({ unsigned long x; asm volatile("movsg ibar"#X",%0" : "=r"(x)); x; })
+
+ if (__debug_regs->dcr & DCR_IBE0 && __get_ibar(0) == addr) {
+ //gdbstub_printk("clear h/w break 0: %08lx\n", addr);
+ __debug_regs->dcr &= ~DCR_IBE0;
+ __debug_regs->ibar[0] = 0;
+ asm volatile("movgs gr0,ibar0");
+ return 0;
+ }
+
+ if (__debug_regs->dcr & DCR_IBE1 && __get_ibar(1) == addr) {
+ //gdbstub_printk("clear h/w break 1: %08lx\n", addr);
+ __debug_regs->dcr &= ~DCR_IBE1;
+ __debug_regs->ibar[1] = 0;
+ asm volatile("movgs gr0,ibar1");
+ return 0;
+ }
+
+ if (__debug_regs->dcr & DCR_IBE2 && __get_ibar(2) == addr) {
+ //gdbstub_printk("clear h/w break 2: %08lx\n", addr);
+ __debug_regs->dcr &= ~DCR_IBE2;
+ __debug_regs->ibar[2] = 0;
+ asm volatile("movgs gr0,ibar2");
+ return 0;
+ }
+
+ if (__debug_regs->dcr & DCR_IBE3 && __get_ibar(3) == addr) {
+ //gdbstub_printk("clear h/w break 3: %08lx\n", addr);
+ __debug_regs->dcr &= ~DCR_IBE3;
+ __debug_regs->ibar[3] = 0;
+ asm volatile("movgs gr0,ibar3");
+ return 0;
+ }
+
+ return -EINVAL;
+
+ /* clear data read/write/access watchpoint */
+ case 2:
+ case 3:
+ case 4:
+ if ((addr & ~7) != ((addr + len - 1) & ~7))
+ return -EINVAL;
+
+ tmp = addr & 7;
+
+ memset(dbmr.bytes, 0xff, sizeof(dbmr.bytes));
+ for (loop = 0; loop < len; loop++)
+ dbmr.bytes[tmp + loop] = 0;
+
+ addr &= ~7;
+
+#define __get_dbar(X) ({ unsigned long x; asm volatile("movsg dbar"#X",%0" : "=r"(x)); x; })
+#define __get_dbmr0(X) ({ unsigned long x; asm volatile("movsg dbmr"#X"0,%0" : "=r"(x)); x; })
+#define __get_dbmr1(X) ({ unsigned long x; asm volatile("movsg dbmr"#X"1,%0" : "=r"(x)); x; })
+
+ /* consider DBAR 0 */
+ tmp = type==2 ? DCR_DWBE0 : type==3 ? DCR_DRBE0 : DCR_DRBE0|DCR_DWBE0;
+
+ if ((__debug_regs->dcr & (DCR_DRBE0|DCR_DWBE0)) != tmp ||
+ __get_dbar(0) != addr ||
+ __get_dbmr0(0) != dbmr.mask0 ||
+ __get_dbmr1(0) != dbmr.mask1)
+ goto skip_dbar0;
+
+ //gdbstub_printk("clear h/w watchpoint 0 type %ld: %08lx\n", type, addr);
+ __debug_regs->dcr &= ~(DCR_DRBE0|DCR_DWBE0);
+ __debug_regs->dbar[0] = 0;
+ __debug_regs->dbmr[0][0] = 0;
+ __debug_regs->dbmr[0][1] = 0;
+ __debug_regs->dbdr[0][0] = 0;
+ __debug_regs->dbdr[0][1] = 0;
+
+ asm volatile(" movgs gr0,dbar0 \n"
+ " movgs gr0,dbmr00 \n"
+ " movgs gr0,dbmr01 \n"
+ " movgs gr0,dbdr00 \n"
+ " movgs gr0,dbdr01 \n");
+ return 0;
+
+ skip_dbar0:
+ /* consider DBAR 0 */
+ tmp = type==2 ? DCR_DWBE1 : type==3 ? DCR_DRBE1 : DCR_DRBE1|DCR_DWBE1;
+
+ if ((__debug_regs->dcr & (DCR_DRBE1|DCR_DWBE1)) != tmp ||
+ __get_dbar(1) != addr ||
+ __get_dbmr0(1) != dbmr.mask0 ||
+ __get_dbmr1(1) != dbmr.mask1)
+ goto skip_dbar1;
+
+ //gdbstub_printk("clear h/w watchpoint 1 type %ld: %08lx\n", type, addr);
+ __debug_regs->dcr &= ~(DCR_DRBE1|DCR_DWBE1);
+ __debug_regs->dbar[1] = 0;
+ __debug_regs->dbmr[1][0] = 0;
+ __debug_regs->dbmr[1][1] = 0;
+ __debug_regs->dbdr[1][0] = 0;
+ __debug_regs->dbdr[1][1] = 0;
+
+ asm volatile(" movgs gr0,dbar1 \n"
+ " movgs gr0,dbmr10 \n"
+ " movgs gr0,dbmr11 \n"
+ " movgs gr0,dbdr10 \n"
+ " movgs gr0,dbdr11 \n");
+ return 0;
+
+ skip_dbar1:
+ return -ENOSPC;
+
+ default:
+ return -EINVAL;
+ }
+} /* end gdbstub_clear_breakpoint() */
+
+/*****************************************************************************/
+/*
+ * check a for an internal software breakpoint, and wind the PC back if necessary
+ */
+static void gdbstub_check_breakpoint(void)
+{
+ unsigned long addr = __debug_frame->pc - 4;
+ int bkpt;
+
+ for (bkpt = 255; bkpt >= 0; bkpt--)
+ if (gdbstub_bkpts[bkpt].addr == addr)
+ break;
+ if (bkpt >= 0)
+ __debug_frame->pc = addr;
+
+ //gdbstub_printk("alter pc [%d] %08lx\n", bkpt, __debug_frame->pc);
+
+} /* end gdbstub_check_breakpoint() */
+
+/*****************************************************************************/
+/*
+ *
+ */
+static void __maybe_unused gdbstub_show_regs(void)
+{
+ unsigned long *reg;
+ int loop;
+
+ gdbstub_printk("\n");
+
+ gdbstub_printk("Frame: @%p [%s]\n",
+ __debug_frame,
+ __debug_frame->psr & PSR_S ? "kernel" : "user");
+
+ reg = (unsigned long *) __debug_frame;
+ for (loop = 0; loop < NR_PT_REGS; loop++) {
+ printk("%s %08lx", regnames[loop + 0], reg[loop + 0]);
+
+ if (loop == NR_PT_REGS - 1 || loop % 5 == 4)
+ printk("\n");
+ else
+ printk(" | ");
+ }
+
+ gdbstub_printk("Process %s (pid: %d)\n", current->comm, current->pid);
+} /* end gdbstub_show_regs() */
+
+/*****************************************************************************/
+/*
+ * dump debugging regs
+ */
+static void __maybe_unused gdbstub_dump_debugregs(void)
+{
+ gdbstub_printk("DCR %08lx ", __debug_status.dcr);
+ gdbstub_printk("BRR %08lx\n", __debug_status.brr);
+
+ gdbstub_printk("IBAR0 %08lx ", __get_ibar(0));
+ gdbstub_printk("IBAR1 %08lx ", __get_ibar(1));
+ gdbstub_printk("IBAR2 %08lx ", __get_ibar(2));
+ gdbstub_printk("IBAR3 %08lx\n", __get_ibar(3));
+
+ gdbstub_printk("DBAR0 %08lx ", __get_dbar(0));
+ gdbstub_printk("DBMR00 %08lx ", __get_dbmr0(0));
+ gdbstub_printk("DBMR01 %08lx\n", __get_dbmr1(0));
+
+ gdbstub_printk("DBAR1 %08lx ", __get_dbar(1));
+ gdbstub_printk("DBMR10 %08lx ", __get_dbmr0(1));
+ gdbstub_printk("DBMR11 %08lx\n", __get_dbmr1(1));
+
+ gdbstub_printk("\n");
+} /* end gdbstub_dump_debugregs() */
+
+/*****************************************************************************/
+/*
+ * dump the MMU state into a structure so that it can be accessed with GDB
+ */
+void gdbstub_get_mmu_state(void)
+{
+ asm volatile("movsg hsr0,%0" : "=r"(__debug_mmu.regs.hsr0));
+ asm volatile("movsg pcsr,%0" : "=r"(__debug_mmu.regs.pcsr));
+ asm volatile("movsg esr0,%0" : "=r"(__debug_mmu.regs.esr0));
+ asm volatile("movsg ear0,%0" : "=r"(__debug_mmu.regs.ear0));
+ asm volatile("movsg epcr0,%0" : "=r"(__debug_mmu.regs.epcr0));
+
+ /* read the protection / SAT registers */
+ __debug_mmu.iamr[0].L = __get_IAMLR(0);
+ __debug_mmu.iamr[0].P = __get_IAMPR(0);
+ __debug_mmu.iamr[1].L = __get_IAMLR(1);
+ __debug_mmu.iamr[1].P = __get_IAMPR(1);
+ __debug_mmu.iamr[2].L = __get_IAMLR(2);
+ __debug_mmu.iamr[2].P = __get_IAMPR(2);
+ __debug_mmu.iamr[3].L = __get_IAMLR(3);
+ __debug_mmu.iamr[3].P = __get_IAMPR(3);
+ __debug_mmu.iamr[4].L = __get_IAMLR(4);
+ __debug_mmu.iamr[4].P = __get_IAMPR(4);
+ __debug_mmu.iamr[5].L = __get_IAMLR(5);
+ __debug_mmu.iamr[5].P = __get_IAMPR(5);
+ __debug_mmu.iamr[6].L = __get_IAMLR(6);
+ __debug_mmu.iamr[6].P = __get_IAMPR(6);
+ __debug_mmu.iamr[7].L = __get_IAMLR(7);
+ __debug_mmu.iamr[7].P = __get_IAMPR(7);
+ __debug_mmu.iamr[8].L = __get_IAMLR(8);
+ __debug_mmu.iamr[8].P = __get_IAMPR(8);
+ __debug_mmu.iamr[9].L = __get_IAMLR(9);
+ __debug_mmu.iamr[9].P = __get_IAMPR(9);
+ __debug_mmu.iamr[10].L = __get_IAMLR(10);
+ __debug_mmu.iamr[10].P = __get_IAMPR(10);
+ __debug_mmu.iamr[11].L = __get_IAMLR(11);
+ __debug_mmu.iamr[11].P = __get_IAMPR(11);
+ __debug_mmu.iamr[12].L = __get_IAMLR(12);
+ __debug_mmu.iamr[12].P = __get_IAMPR(12);
+ __debug_mmu.iamr[13].L = __get_IAMLR(13);
+ __debug_mmu.iamr[13].P = __get_IAMPR(13);
+ __debug_mmu.iamr[14].L = __get_IAMLR(14);
+ __debug_mmu.iamr[14].P = __get_IAMPR(14);
+ __debug_mmu.iamr[15].L = __get_IAMLR(15);
+ __debug_mmu.iamr[15].P = __get_IAMPR(15);
+
+ __debug_mmu.damr[0].L = __get_DAMLR(0);
+ __debug_mmu.damr[0].P = __get_DAMPR(0);
+ __debug_mmu.damr[1].L = __get_DAMLR(1);
+ __debug_mmu.damr[1].P = __get_DAMPR(1);
+ __debug_mmu.damr[2].L = __get_DAMLR(2);
+ __debug_mmu.damr[2].P = __get_DAMPR(2);
+ __debug_mmu.damr[3].L = __get_DAMLR(3);
+ __debug_mmu.damr[3].P = __get_DAMPR(3);
+ __debug_mmu.damr[4].L = __get_DAMLR(4);
+ __debug_mmu.damr[4].P = __get_DAMPR(4);
+ __debug_mmu.damr[5].L = __get_DAMLR(5);
+ __debug_mmu.damr[5].P = __get_DAMPR(5);
+ __debug_mmu.damr[6].L = __get_DAMLR(6);
+ __debug_mmu.damr[6].P = __get_DAMPR(6);
+ __debug_mmu.damr[7].L = __get_DAMLR(7);
+ __debug_mmu.damr[7].P = __get_DAMPR(7);
+ __debug_mmu.damr[8].L = __get_DAMLR(8);
+ __debug_mmu.damr[8].P = __get_DAMPR(8);
+ __debug_mmu.damr[9].L = __get_DAMLR(9);
+ __debug_mmu.damr[9].P = __get_DAMPR(9);
+ __debug_mmu.damr[10].L = __get_DAMLR(10);
+ __debug_mmu.damr[10].P = __get_DAMPR(10);
+ __debug_mmu.damr[11].L = __get_DAMLR(11);
+ __debug_mmu.damr[11].P = __get_DAMPR(11);
+ __debug_mmu.damr[12].L = __get_DAMLR(12);
+ __debug_mmu.damr[12].P = __get_DAMPR(12);
+ __debug_mmu.damr[13].L = __get_DAMLR(13);
+ __debug_mmu.damr[13].P = __get_DAMPR(13);
+ __debug_mmu.damr[14].L = __get_DAMLR(14);
+ __debug_mmu.damr[14].P = __get_DAMPR(14);
+ __debug_mmu.damr[15].L = __get_DAMLR(15);
+ __debug_mmu.damr[15].P = __get_DAMPR(15);
+
+#ifdef CONFIG_MMU
+ do {
+ /* read the DAT entries from the TLB */
+ struct __debug_amr *p;
+ int loop;
+
+ asm volatile("movsg tplr,%0" : "=r"(__debug_mmu.regs.tplr));
+ asm volatile("movsg tppr,%0" : "=r"(__debug_mmu.regs.tppr));
+ asm volatile("movsg tpxr,%0" : "=r"(__debug_mmu.regs.tpxr));
+ asm volatile("movsg cxnr,%0" : "=r"(__debug_mmu.regs.cxnr));
+
+ p = __debug_mmu.tlb;
+
+ /* way 0 */
+ asm volatile("movgs %0,tpxr" :: "r"(0 << TPXR_WAY_SHIFT));
+ for (loop = 0; loop < 64; loop++) {
+ asm volatile("tlbpr %0,gr0,#1,#0" :: "r"(loop << PAGE_SHIFT));
+ asm volatile("movsg tplr,%0" : "=r"(p->L));
+ asm volatile("movsg tppr,%0" : "=r"(p->P));
+ p++;
+ }
+
+ /* way 1 */
+ asm volatile("movgs %0,tpxr" :: "r"(1 << TPXR_WAY_SHIFT));
+ for (loop = 0; loop < 64; loop++) {
+ asm volatile("tlbpr %0,gr0,#1,#0" :: "r"(loop << PAGE_SHIFT));
+ asm volatile("movsg tplr,%0" : "=r"(p->L));
+ asm volatile("movsg tppr,%0" : "=r"(p->P));
+ p++;
+ }
+
+ asm volatile("movgs %0,tplr" :: "r"(__debug_mmu.regs.tplr));
+ asm volatile("movgs %0,tppr" :: "r"(__debug_mmu.regs.tppr));
+ asm volatile("movgs %0,tpxr" :: "r"(__debug_mmu.regs.tpxr));
+ } while(0);
+#endif
+
+} /* end gdbstub_get_mmu_state() */
+
+/*
+ * handle general query commands of the form 'qXXXXX'
+ */
+static void gdbstub_handle_query(void)
+{
+ if (strcmp(input_buffer, "qAttached") == 0) {
+ /* return current thread ID */
+ sprintf(output_buffer, "1");
+ return;
+ }
+
+ if (strcmp(input_buffer, "qC") == 0) {
+ /* return current thread ID */
+ sprintf(output_buffer, "QC 0");
+ return;
+ }
+
+ if (strcmp(input_buffer, "qOffsets") == 0) {
+ /* return relocation offset of text and data segments */
+ sprintf(output_buffer, "Text=0;Data=0;Bss=0");
+ return;
+ }
+
+ if (strcmp(input_buffer, "qSymbol::") == 0) {
+ sprintf(output_buffer, "OK");
+ return;
+ }
+
+ if (strcmp(input_buffer, "qSupported") == 0) {
+ /* query of supported features */
+ sprintf(output_buffer, "PacketSize=%u;ReverseContinue-;ReverseStep-",
+ sizeof(input_buffer));
+ return;
+ }
+
+ gdbstub_strcpy(output_buffer,"E01");
+}
+
+/*****************************************************************************/
+/*
+ * handle event interception and GDB remote protocol processing
+ * - on entry:
+ * PSR.ET==0, PSR.S==1 and the CPU is in debug mode
+ * __debug_frame points to the saved registers
+ * __frame points to the kernel mode exception frame, if it was in kernel
+ * mode when the break happened
+ */
+void gdbstub(int sigval)
+{
+ unsigned long addr, length, loop, dbar, temp, temp2, temp3;
+ uint32_t zero;
+ char *ptr;
+ int flush_cache = 0;
+
+ LEDS(0x5000);
+
+ if (sigval < 0) {
+#ifndef CONFIG_GDBSTUB_IMMEDIATE
+ /* return immediately if GDB immediate activation option not set */
+ return;
+#else
+ sigval = SIGINT;
+#endif
+ }
+
+ save_user_regs(&__debug_frame0->uc);
+
+#if 0
+ gdbstub_printk("--> gdbstub() %08x %p %08x %08x\n",
+ __debug_frame->pc,
+ __debug_frame,
+ __debug_regs->brr,
+ __debug_regs->bpsr);
+// gdbstub_show_regs();
+#endif
+
+ LEDS(0x5001);
+
+ /* if we were interrupted by input on the serial gdbstub serial port,
+ * restore the context prior to the interrupt so that we return to that
+ * directly
+ */
+ temp = (unsigned long) __entry_kerneltrap_table;
+ temp2 = (unsigned long) __entry_usertrap_table;
+ temp3 = __debug_frame->pc & ~15;
+
+ if (temp3 == temp + TBR_TT_INTERRUPT_15 ||
+ temp3 == temp2 + TBR_TT_INTERRUPT_15
+ ) {
+ asm volatile("movsg pcsr,%0" : "=r"(__debug_frame->pc));
+ __debug_frame->psr |= PSR_ET;
+ __debug_frame->psr &= ~PSR_S;
+ if (__debug_frame->psr & PSR_PS)
+ __debug_frame->psr |= PSR_S;
+ __debug_status.brr = (__debug_frame->tbr & TBR_TT) << 12;
+ __debug_status.brr |= BRR_EB;
+ sigval = SIGINT;
+ }
+
+ /* handle the decrement timer going off (FR451 only) */
+ if (temp3 == temp + TBR_TT_DECREMENT_TIMER ||
+ temp3 == temp2 + TBR_TT_DECREMENT_TIMER
+ ) {
+ asm volatile("movgs %0,timerd" :: "r"(10000000));
+ asm volatile("movsg pcsr,%0" : "=r"(__debug_frame->pc));
+ __debug_frame->psr |= PSR_ET;
+ __debug_frame->psr &= ~PSR_S;
+ if (__debug_frame->psr & PSR_PS)
+ __debug_frame->psr |= PSR_S;
+ __debug_status.brr = (__debug_frame->tbr & TBR_TT) << 12;
+ __debug_status.brr |= BRR_EB;
+ sigval = SIGXCPU;
+ }
+
+ LEDS(0x5002);
+
+ /* after a BREAK insn, the PC lands on the far side of it */
+ if (__debug_status.brr & BRR_SB)
+ gdbstub_check_breakpoint();
+
+ LEDS(0x5003);
+
+ /* handle attempts to write console data via GDB "O" commands */
+ if (__debug_frame->pc == (unsigned long) gdbstub_console_write + 4) {
+ __gdbstub_console_write((struct console *) __debug_frame->gr8,
+ (const char *) __debug_frame->gr9,
+ (unsigned) __debug_frame->gr10);
+ goto done;
+ }
+
+ if (gdbstub_rx_unget) {
+ sigval = SIGINT;
+ goto packet_waiting;
+ }
+
+ if (!sigval)
+ sigval = gdbstub_compute_signal(__debug_status.brr);
+
+ LEDS(0x5004);
+
+ /* send a message to the debugger's user saying what happened if it may
+ * not be clear cut (we can't map exceptions onto signals properly)
+ */
+ if (sigval != SIGINT && sigval != SIGTRAP && sigval != SIGILL) {
+ static const char title[] = "Break ";
+ static const char crlf[] = "\r\n";
+ unsigned long brr = __debug_status.brr;
+ char hx;
+
+ ptr = output_buffer;
+ *ptr++ = 'O';
+ ptr = mem2hex(title, ptr, sizeof(title) - 1,0);
+
+ hx = hex_asc_hi(brr >> 24);
+ ptr = hex_byte_pack(ptr, hx);
+ hx = hex_asc_lo(brr >> 24);
+ ptr = hex_byte_pack(ptr, hx);
+ hx = hex_asc_hi(brr >> 16);
+ ptr = hex_byte_pack(ptr, hx);
+ hx = hex_asc_lo(brr >> 16);
+ ptr = hex_byte_pack(ptr, hx);
+ hx = hex_asc_hi(brr >> 8);
+ ptr = hex_byte_pack(ptr, hx);
+ hx = hex_asc_lo(brr >> 8);
+ ptr = hex_byte_pack(ptr, hx);
+ hx = hex_asc_hi(brr);
+ ptr = hex_byte_pack(ptr, hx);
+ hx = hex_asc_lo(brr);
+ ptr = hex_byte_pack(ptr, hx);
+
+ ptr = mem2hex(crlf, ptr, sizeof(crlf) - 1, 0);
+ *ptr = 0;
+ gdbstub_send_packet(output_buffer); /* send it off... */
+ }
+
+ LEDS(0x5005);
+
+ /* tell the debugger that an exception has occurred */
+ ptr = output_buffer;
+
+ /* Send trap type (converted to signal) */
+ *ptr++ = 'T';
+ ptr = hex_byte_pack(ptr, sigval);
+
+ /* Send Error PC */
+ ptr = hex_byte_pack(ptr, GDB_REG_PC);
+ *ptr++ = ':';
+ ptr = mem2hex(&__debug_frame->pc, ptr, 4, 0);
+ *ptr++ = ';';
+
+ /*
+ * Send frame pointer
+ */
+ ptr = hex_byte_pack(ptr, GDB_REG_FP);
+ *ptr++ = ':';
+ ptr = mem2hex(&__debug_frame->fp, ptr, 4, 0);
+ *ptr++ = ';';
+
+ /*
+ * Send stack pointer
+ */
+ ptr = hex_byte_pack(ptr, GDB_REG_SP);
+ *ptr++ = ':';
+ ptr = mem2hex(&__debug_frame->sp, ptr, 4, 0);
+ *ptr++ = ';';
+
+ *ptr++ = 0;
+ gdbstub_send_packet(output_buffer); /* send it off... */
+
+ LEDS(0x5006);
+
+ packet_waiting:
+ gdbstub_get_mmu_state();
+
+ /* wait for input from remote GDB */
+ while (1) {
+ output_buffer[0] = 0;
+
+ LEDS(0x5007);
+ gdbstub_recv_packet(input_buffer);
+ LEDS(0x5600 | input_buffer[0]);
+
+ switch (input_buffer[0]) {
+ /* request repeat of last signal number */
+ case '?':
+ output_buffer[0] = 'S';
+ output_buffer[1] = hex_asc_hi(sigval);
+ output_buffer[2] = hex_asc_lo(sigval);
+ output_buffer[3] = 0;
+ break;
+
+ case 'd':
+ /* toggle debug flag */
+ break;
+
+ /* return the value of the CPU registers
+ * - GR0, GR1, GR2, GR3, GR4, GR5, GR6, GR7,
+ * - GR8, GR9, GR10, GR11, GR12, GR13, GR14, GR15,
+ * - GR16, GR17, GR18, GR19, GR20, GR21, GR22, GR23,
+ * - GR24, GR25, GR26, GR27, GR28, GR29, GR30, GR31,
+ * - GR32, GR33, GR34, GR35, GR36, GR37, GR38, GR39,
+ * - GR40, GR41, GR42, GR43, GR44, GR45, GR46, GR47,
+ * - GR48, GR49, GR50, GR51, GR52, GR53, GR54, GR55,
+ * - GR56, GR57, GR58, GR59, GR60, GR61, GR62, GR63,
+ * - FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
+ * - FP8, FP9, FP10, FP11, FP12, FP13, FP14, FP15,
+ * - FP16, FP17, FP18, FP19, FP20, FP21, FP22, FP23,
+ * - FP24, FP25, FP26, FP27, FP28, FP29, FP30, FP31,
+ * - FP32, FP33, FP34, FP35, FP36, FP37, FP38, FP39,
+ * - FP40, FP41, FP42, FP43, FP44, FP45, FP46, FP47,
+ * - FP48, FP49, FP50, FP51, FP52, FP53, FP54, FP55,
+ * - FP56, FP57, FP58, FP59, FP60, FP61, FP62, FP63,
+ * - PC, PSR, CCR, CCCR,
+ * - _X132, _X133, _X134
+ * - TBR, BRR, DBAR0, DBAR1, DBAR2, DBAR3,
+ * - _X141, _X142, _X143, _X144,
+ * - LR, LCR
+ */
+ case 'g':
+ zero = 0;
+ ptr = output_buffer;
+
+ /* deal with GR0, GR1-GR27, GR28-GR31, GR32-GR63 */
+ ptr = mem2hex(&zero, ptr, 4, 0);
+
+ for (loop = 1; loop <= 27; loop++)
+ ptr = mem2hex(&__debug_user_context->i.gr[loop], ptr, 4, 0);
+ temp = (unsigned long) __frame;
+ ptr = mem2hex(&temp, ptr, 4, 0);
+ ptr = mem2hex(&__debug_user_context->i.gr[29], ptr, 4, 0);
+ ptr = mem2hex(&__debug_user_context->i.gr[30], ptr, 4, 0);
+#ifdef CONFIG_MMU
+ ptr = mem2hex(&__debug_user_context->i.gr[31], ptr, 4, 0);
+#else
+ temp = (unsigned long) __debug_frame;
+ ptr = mem2hex(&temp, ptr, 4, 0);
+#endif
+
+ for (loop = 32; loop <= 63; loop++)
+ ptr = mem2hex(&__debug_user_context->i.gr[loop], ptr, 4, 0);
+
+ /* deal with FR0-FR63 */
+ for (loop = 0; loop <= 63; loop++)
+ ptr = mem2hex(&__debug_user_context->f.fr[loop], ptr, 4, 0);
+
+ /* deal with special registers */
+ ptr = mem2hex(&__debug_frame->pc, ptr, 4, 0);
+ ptr = mem2hex(&__debug_frame->psr, ptr, 4, 0);
+ ptr = mem2hex(&__debug_frame->ccr, ptr, 4, 0);
+ ptr = mem2hex(&__debug_frame->cccr, ptr, 4, 0);
+ ptr = mem2hex(&zero, ptr, 4, 0);
+ ptr = mem2hex(&zero, ptr, 4, 0);
+ ptr = mem2hex(&zero, ptr, 4, 0);
+ ptr = mem2hex(&__debug_frame->tbr, ptr, 4, 0);
+ ptr = mem2hex(&__debug_status.brr , ptr, 4, 0);
+
+ asm volatile("movsg dbar0,%0" : "=r"(dbar));
+ ptr = mem2hex(&dbar, ptr, 4, 0);
+ asm volatile("movsg dbar1,%0" : "=r"(dbar));
+ ptr = mem2hex(&dbar, ptr, 4, 0);
+ asm volatile("movsg dbar2,%0" : "=r"(dbar));
+ ptr = mem2hex(&dbar, ptr, 4, 0);
+ asm volatile("movsg dbar3,%0" : "=r"(dbar));
+ ptr = mem2hex(&dbar, ptr, 4, 0);
+
+ asm volatile("movsg scr0,%0" : "=r"(dbar));
+ ptr = mem2hex(&dbar, ptr, 4, 0);
+ asm volatile("movsg scr1,%0" : "=r"(dbar));
+ ptr = mem2hex(&dbar, ptr, 4, 0);
+ asm volatile("movsg scr2,%0" : "=r"(dbar));
+ ptr = mem2hex(&dbar, ptr, 4, 0);
+ asm volatile("movsg scr3,%0" : "=r"(dbar));
+ ptr = mem2hex(&dbar, ptr, 4, 0);
+
+ ptr = mem2hex(&__debug_frame->lr, ptr, 4, 0);
+ ptr = mem2hex(&__debug_frame->lcr, ptr, 4, 0);
+
+ ptr = mem2hex(&__debug_frame->iacc0, ptr, 8, 0);
+
+ ptr = mem2hex(&__debug_user_context->f.fsr[0], ptr, 4, 0);
+
+ for (loop = 0; loop <= 7; loop++)
+ ptr = mem2hex(&__debug_user_context->f.acc[loop], ptr, 4, 0);
+
+ ptr = mem2hex(&__debug_user_context->f.accg, ptr, 8, 0);
+
+ for (loop = 0; loop <= 1; loop++)
+ ptr = mem2hex(&__debug_user_context->f.msr[loop], ptr, 4, 0);
+
+ ptr = mem2hex(&__debug_frame->gner0, ptr, 4, 0);
+ ptr = mem2hex(&__debug_frame->gner1, ptr, 4, 0);
+
+ ptr = mem2hex(&__debug_user_context->f.fner[0], ptr, 4, 0);
+ ptr = mem2hex(&__debug_user_context->f.fner[1], ptr, 4, 0);
+
+ break;
+
+ /* set the values of the CPU registers */
+ case 'G':
+ ptr = &input_buffer[1];
+
+ /* deal with GR0, GR1-GR27, GR28-GR31, GR32-GR63 */
+ ptr = hex2mem(ptr, &temp, 4);
+
+ for (loop = 1; loop <= 27; loop++)
+ ptr = hex2mem(ptr, &__debug_user_context->i.gr[loop], 4);
+
+ ptr = hex2mem(ptr, &temp, 4);
+ __frame = (struct pt_regs *) temp;
+ ptr = hex2mem(ptr, &__debug_frame->gr29, 4);
+ ptr = hex2mem(ptr, &__debug_frame->gr30, 4);
+#ifdef CONFIG_MMU
+ ptr = hex2mem(ptr, &__debug_frame->gr31, 4);
+#else
+ ptr = hex2mem(ptr, &temp, 4);
+#endif
+
+ for (loop = 32; loop <= 63; loop++)
+ ptr = hex2mem(ptr, &__debug_user_context->i.gr[loop], 4);
+
+ /* deal with FR0-FR63 */
+ for (loop = 0; loop <= 63; loop++)
+ ptr = mem2hex(&__debug_user_context->f.fr[loop], ptr, 4, 0);
+
+ /* deal with special registers */
+ ptr = hex2mem(ptr, &__debug_frame->pc, 4);
+ ptr = hex2mem(ptr, &__debug_frame->psr, 4);
+ ptr = hex2mem(ptr, &__debug_frame->ccr, 4);
+ ptr = hex2mem(ptr, &__debug_frame->cccr,4);
+
+ for (loop = 132; loop <= 140; loop++)
+ ptr = hex2mem(ptr, &temp, 4);
+
+ ptr = hex2mem(ptr, &temp, 4);
+ asm volatile("movgs %0,scr0" :: "r"(temp));
+ ptr = hex2mem(ptr, &temp, 4);
+ asm volatile("movgs %0,scr1" :: "r"(temp));
+ ptr = hex2mem(ptr, &temp, 4);
+ asm volatile("movgs %0,scr2" :: "r"(temp));
+ ptr = hex2mem(ptr, &temp, 4);
+ asm volatile("movgs %0,scr3" :: "r"(temp));
+
+ ptr = hex2mem(ptr, &__debug_frame->lr, 4);
+ ptr = hex2mem(ptr, &__debug_frame->lcr, 4);
+
+ ptr = hex2mem(ptr, &__debug_frame->iacc0, 8);
+
+ ptr = hex2mem(ptr, &__debug_user_context->f.fsr[0], 4);
+
+ for (loop = 0; loop <= 7; loop++)
+ ptr = hex2mem(ptr, &__debug_user_context->f.acc[loop], 4);
+
+ ptr = hex2mem(ptr, &__debug_user_context->f.accg, 8);
+
+ for (loop = 0; loop <= 1; loop++)
+ ptr = hex2mem(ptr, &__debug_user_context->f.msr[loop], 4);
+
+ ptr = hex2mem(ptr, &__debug_frame->gner0, 4);
+ ptr = hex2mem(ptr, &__debug_frame->gner1, 4);
+
+ ptr = hex2mem(ptr, &__debug_user_context->f.fner[0], 4);
+ ptr = hex2mem(ptr, &__debug_user_context->f.fner[1], 4);
+
+ gdbstub_strcpy(output_buffer,"OK");
+ break;
+
+ /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */
+ case 'm':
+ ptr = &input_buffer[1];
+
+ if (hexToInt(&ptr, &addr) &&
+ *ptr++ == ',' &&
+ hexToInt(&ptr, &length)
+ ) {
+ if (mem2hex((char *)addr, output_buffer, length, 1))
+ break;
+ gdbstub_strcpy (output_buffer, "E03");
+ }
+ else {
+ gdbstub_strcpy(output_buffer,"E01");
+ }
+ break;
+
+ /* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */
+ case 'M':
+ ptr = &input_buffer[1];
+
+ if (hexToInt(&ptr, &addr) &&
+ *ptr++ == ',' &&
+ hexToInt(&ptr, &length) &&
+ *ptr++ == ':'
+ ) {
+ if (hex2mem(ptr, (char *)addr, length)) {
+ gdbstub_strcpy(output_buffer, "OK");
+ }
+ else {
+ gdbstub_strcpy(output_buffer, "E03");
+ }
+ }
+ else
+ gdbstub_strcpy(output_buffer, "E02");
+
+ flush_cache = 1;
+ break;
+
+ /* pNN: Read value of reg N and return it */
+ case 'p':
+ /* return no value, indicating that we don't support
+ * this command and that gdb should use 'g' instead */
+ break;
+
+ /* PNN,=RRRRRRRR: Write value R to reg N return OK */
+ case 'P':
+ ptr = &input_buffer[1];
+
+ if (!hexToInt(&ptr, &addr) ||
+ *ptr++ != '=' ||
+ !hexToInt(&ptr, &temp)
+ ) {
+ gdbstub_strcpy(output_buffer, "E01");
+ break;
+ }
+
+ temp2 = 1;
+ switch (addr) {
+ case GDB_REG_GR(0):
+ break;
+ case GDB_REG_GR(1) ... GDB_REG_GR(63):
+ __debug_user_context->i.gr[addr - GDB_REG_GR(0)] = temp;
+ break;
+ case GDB_REG_FR(0) ... GDB_REG_FR(63):
+ __debug_user_context->f.fr[addr - GDB_REG_FR(0)] = temp;
+ break;
+ case GDB_REG_PC:
+ __debug_user_context->i.pc = temp;
+ break;
+ case GDB_REG_PSR:
+ __debug_user_context->i.psr = temp;
+ break;
+ case GDB_REG_CCR:
+ __debug_user_context->i.ccr = temp;
+ break;
+ case GDB_REG_CCCR:
+ __debug_user_context->i.cccr = temp;
+ break;
+ case GDB_REG_BRR:
+ __debug_status.brr = temp;
+ break;
+ case GDB_REG_LR:
+ __debug_user_context->i.lr = temp;
+ break;
+ case GDB_REG_LCR:
+ __debug_user_context->i.lcr = temp;
+ break;
+ case GDB_REG_FSR0:
+ __debug_user_context->f.fsr[0] = temp;
+ break;
+ case GDB_REG_ACC(0) ... GDB_REG_ACC(7):
+ __debug_user_context->f.acc[addr - GDB_REG_ACC(0)] = temp;
+ break;
+ case GDB_REG_ACCG(0):
+ *(uint32_t *) &__debug_user_context->f.accg[0] = temp;
+ break;
+ case GDB_REG_ACCG(4):
+ *(uint32_t *) &__debug_user_context->f.accg[4] = temp;
+ break;
+ case GDB_REG_MSR(0) ... GDB_REG_MSR(1):
+ __debug_user_context->f.msr[addr - GDB_REG_MSR(0)] = temp;
+ break;
+ case GDB_REG_GNER(0) ... GDB_REG_GNER(1):
+ __debug_user_context->i.gner[addr - GDB_REG_GNER(0)] = temp;
+ break;
+ case GDB_REG_FNER(0) ... GDB_REG_FNER(1):
+ __debug_user_context->f.fner[addr - GDB_REG_FNER(0)] = temp;
+ break;
+ default:
+ temp2 = 0;
+ break;
+ }
+
+ if (temp2) {
+ gdbstub_strcpy(output_buffer, "OK");
+ }
+ else {
+ gdbstub_strcpy(output_buffer, "E02");
+ }
+ break;
+
+ /* cAA..AA Continue at address AA..AA(optional) */
+ case 'c':
+ /* try to read optional parameter, pc unchanged if no parm */
+ ptr = &input_buffer[1];
+ if (hexToInt(&ptr, &addr))
+ __debug_frame->pc = addr;
+ goto done;
+
+ /* kill the program */
+ case 'k' :
+ goto done; /* just continue */
+
+ /* detach */
+ case 'D':
+ gdbstub_strcpy(output_buffer, "OK");
+ break;
+
+ /* reset the whole machine (FIXME: system dependent) */
+ case 'r':
+ break;
+
+
+ /* step to next instruction */
+ case 's':
+ __debug_regs->dcr |= DCR_SE;
+ __debug_status.dcr |= DCR_SE;
+ goto done;
+
+ /* extended command */
+ case 'v':
+ if (strcmp(input_buffer, "vCont?") == 0) {
+ output_buffer[0] = 0;
+ break;
+ }
+ goto unsupported_cmd;
+
+ /* set baud rate (bBB) */
+ case 'b':
+ ptr = &input_buffer[1];
+ if (!hexToInt(&ptr, &temp)) {
+ gdbstub_strcpy(output_buffer,"B01");
+ break;
+ }
+
+ if (temp) {
+ /* ack before changing speed */
+ gdbstub_send_packet("OK");
+ gdbstub_set_baud(temp);
+ }
+ break;
+
+ /* set breakpoint */
+ case 'Z':
+ ptr = &input_buffer[1];
+
+ if (!hexToInt(&ptr,&temp) || *ptr++ != ',' ||
+ !hexToInt(&ptr,&addr) || *ptr++ != ',' ||
+ !hexToInt(&ptr,&length)
+ ) {
+ gdbstub_strcpy(output_buffer,"E01");
+ break;
+ }
+
+ if (temp >= 5) {
+ gdbstub_strcpy(output_buffer,"E03");
+ break;
+ }
+
+ if (gdbstub_set_breakpoint(temp, addr, length) < 0) {
+ gdbstub_strcpy(output_buffer,"E03");
+ break;
+ }
+
+ if (temp == 0)
+ flush_cache = 1; /* soft bkpt by modified memory */
+
+ gdbstub_strcpy(output_buffer,"OK");
+ break;
+
+ /* clear breakpoint */
+ case 'z':
+ ptr = &input_buffer[1];
+
+ if (!hexToInt(&ptr,&temp) || *ptr++ != ',' ||
+ !hexToInt(&ptr,&addr) || *ptr++ != ',' ||
+ !hexToInt(&ptr,&length)
+ ) {
+ gdbstub_strcpy(output_buffer,"E01");
+ break;
+ }
+
+ if (temp >= 5) {
+ gdbstub_strcpy(output_buffer,"E03");
+ break;
+ }
+
+ if (gdbstub_clear_breakpoint(temp, addr, length) < 0) {
+ gdbstub_strcpy(output_buffer,"E03");
+ break;
+ }
+
+ if (temp == 0)
+ flush_cache = 1; /* soft bkpt by modified memory */
+
+ gdbstub_strcpy(output_buffer,"OK");
+ break;
+
+ /* Thread-setting packet */
+ case 'H':
+ gdbstub_strcpy(output_buffer, "OK");
+ break;
+
+ case 'q':
+ gdbstub_handle_query();
+ break;
+
+ default:
+ unsupported_cmd:
+ gdbstub_proto("### GDB Unsupported Cmd '%s'\n",input_buffer);
+ gdbstub_strcpy(output_buffer,"E01");
+ break;
+ }
+
+ /* reply to the request */
+ LEDS(0x5009);
+ gdbstub_send_packet(output_buffer);
+ }
+
+ done:
+ restore_user_regs(&__debug_frame0->uc);
+
+ //gdbstub_dump_debugregs();
+ //gdbstub_printk("<-- gdbstub() %08x\n", __debug_frame->pc);
+
+ /* need to flush the instruction cache before resuming, as we may have
+ * deposited a breakpoint, and the icache probably has no way of
+ * knowing that a data ref to some location may have changed something
+ * that is in the instruction cache. NB: We flush both caches, just to
+ * be sure...
+ */
+
+ /* note: flushing the icache will clobber EAR0 on the FR451 */
+ if (flush_cache)
+ gdbstub_purge_cache();
+
+ LEDS(0x5666);
+
+} /* end gdbstub() */
+
+/*****************************************************************************/
+/*
+ * initialise the GDB stub
+ */
+void __init gdbstub_init(void)
+{
+#ifdef CONFIG_GDBSTUB_IMMEDIATE
+ unsigned char ch;
+ int ret;
+#endif
+
+ gdbstub_printk("%s", gdbstub_banner);
+
+ gdbstub_io_init();
+
+ /* try to talk to GDB (or anyone insane enough to want to type GDB protocol by hand) */
+ gdbstub_proto("### GDB Tx ACK\n");
+ gdbstub_tx_char('+'); /* 'hello world' */
+
+#ifdef CONFIG_GDBSTUB_IMMEDIATE
+ gdbstub_printk("GDB Stub waiting for packet\n");
+
+ /*
+ * In case GDB is started before us, ack any packets
+ * (presumably "$?#xx") sitting there.
+ */
+ do { gdbstub_rx_char(&ch, 0); } while (ch != '$');
+ do { gdbstub_rx_char(&ch, 0); } while (ch != '#');
+ do { ret = gdbstub_rx_char(&ch, 0); } while (ret != 0); /* eat first csum byte */
+ do { ret = gdbstub_rx_char(&ch, 0); } while (ret != 0); /* eat second csum byte */
+
+ gdbstub_proto("### GDB Tx NAK\n");
+ gdbstub_tx_char('-'); /* nak it */
+
+#else
+ gdbstub_printk("GDB Stub set\n");
+#endif
+
+#if 0
+ /* send banner */
+ ptr = output_buffer;
+ *ptr++ = 'O';
+ ptr = mem2hex(gdbstub_banner, ptr, sizeof(gdbstub_banner) - 1, 0);
+ gdbstub_send_packet(output_buffer);
+#endif
+#if defined(CONFIG_GDB_CONSOLE) && defined(CONFIG_GDBSTUB_IMMEDIATE)
+ register_console(&gdbstub_console);
+#endif
+
+} /* end gdbstub_init() */
+
+/*****************************************************************************/
+/*
+ * register the console at a more appropriate time
+ */
+#if defined (CONFIG_GDB_CONSOLE) && !defined(CONFIG_GDBSTUB_IMMEDIATE)
+static int __init gdbstub_postinit(void)
+{
+ printk("registering console\n");
+ register_console(&gdbstub_console);
+ return 0;
+} /* end gdbstub_postinit() */
+
+__initcall(gdbstub_postinit);
+#endif
+
+/*****************************************************************************/
+/*
+ * send an exit message to GDB
+ */
+void gdbstub_exit(int status)
+{
+ unsigned char checksum;
+ int count;
+ unsigned char ch;
+
+ sprintf(output_buffer,"W%02x",status&0xff);
+
+ gdbstub_tx_char('$');
+ checksum = 0;
+ count = 0;
+
+ while ((ch = output_buffer[count]) != 0) {
+ gdbstub_tx_char(ch);
+ checksum += ch;
+ count += 1;
+ }
+
+ gdbstub_tx_char('#');
+ gdbstub_tx_char(hex_asc_hi(checksum));
+ gdbstub_tx_char(hex_asc_lo(checksum));
+
+ /* make sure the output is flushed, or else RedBoot might clobber it */
+ gdbstub_tx_char('-');
+ gdbstub_tx_flush();
+
+} /* end gdbstub_exit() */
+
+/*****************************************************************************/
+/*
+ * GDB wants to call malloc() and free() to allocate memory for calling kernel
+ * functions directly from its command line
+ */
+static void *malloc(size_t size) __maybe_unused;
+static void *malloc(size_t size)
+{
+ return kmalloc(size, GFP_ATOMIC);
+}
+
+static void free(void *p) __maybe_unused;
+static void free(void *p)
+{
+ kfree(p);
+}
+
+static uint32_t ___get_HSR0(void) __maybe_unused;
+static uint32_t ___get_HSR0(void)
+{
+ return __get_HSR(0);
+}
+
+static uint32_t ___set_HSR0(uint32_t x) __maybe_unused;
+static uint32_t ___set_HSR0(uint32_t x)
+{
+ __set_HSR(0, x);
+ return __get_HSR(0);
+}
diff --git a/arch/frv/kernel/head-mmu-fr451.S b/arch/frv/kernel/head-mmu-fr451.S
new file mode 100644
index 00000000000..98f87d586e5
--- /dev/null
+++ b/arch/frv/kernel/head-mmu-fr451.S
@@ -0,0 +1,374 @@
+/* head-mmu-fr451.S: FR451 mmu-linux specific bits of initialisation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/mem-layout.h>
+#include <asm/spr-regs.h>
+#include <asm/mb86943a.h>
+#include "head.inc"
+
+
+#define __400_DBR0 0xfe000e00
+#define __400_DBR1 0xfe000e08
+#define __400_DBR2 0xfe000e10
+#define __400_DBR3 0xfe000e18
+#define __400_DAM0 0xfe000f00
+#define __400_DAM1 0xfe000f08
+#define __400_DAM2 0xfe000f10
+#define __400_DAM3 0xfe000f18
+#define __400_LGCR 0xfe000010
+#define __400_LCR 0xfe000100
+#define __400_LSBR 0xfe000c00
+
+ __INIT
+ .balign 4
+
+###############################################################################
+#
+# describe the position and layout of the SDRAM controller registers
+#
+# ENTRY: EXIT:
+# GR5 - cacheline size
+# GR11 - displacement of 2nd SDRAM addr reg from GR14
+# GR12 - displacement of 3rd SDRAM addr reg from GR14
+# GR13 - displacement of 4th SDRAM addr reg from GR14
+# GR14 - address of 1st SDRAM addr reg
+# GR15 - amount to shift address by to match SDRAM addr reg
+# GR26 &__head_reference [saved]
+# GR30 LED address [saved]
+# CC0 - T if DBR0 is present
+# CC1 - T if DBR1 is present
+# CC2 - T if DBR2 is present
+# CC3 - T if DBR3 is present
+#
+###############################################################################
+ .globl __head_fr451_describe_sdram
+__head_fr451_describe_sdram:
+ sethi.p %hi(__400_DBR0),gr14
+ setlo %lo(__400_DBR0),gr14
+ setlos.p #__400_DBR1-__400_DBR0,gr11
+ setlos #__400_DBR2-__400_DBR0,gr12
+ setlos.p #__400_DBR3-__400_DBR0,gr13
+ setlos #32,gr5 ; cacheline size
+ setlos.p #0,gr15 ; amount to shift addr reg by
+ setlos #0x00ff,gr4
+ movgs gr4,cccr ; extant DARS/DAMK regs
+ bralr
+
+###############################################################################
+#
+# rearrange the bus controller registers
+#
+# ENTRY: EXIT:
+# GR26 &__head_reference [saved]
+# GR30 LED address revised LED address
+#
+###############################################################################
+ .globl __head_fr451_set_busctl
+__head_fr451_set_busctl:
+ sethi.p %hi(__400_LGCR),gr4
+ setlo %lo(__400_LGCR),gr4
+ sethi.p %hi(__400_LSBR),gr10
+ setlo %lo(__400_LSBR),gr10
+ sethi.p %hi(__400_LCR),gr11
+ setlo %lo(__400_LCR),gr11
+
+ # set the bus controller
+ ldi @(gr4,#0),gr5
+ ori gr5,#0xff,gr5 ; make sure all chip-selects are enabled
+ sti gr5,@(gr4,#0)
+
+ sethi.p %hi(__region_CS1),gr4
+ setlo %lo(__region_CS1),gr4
+ sethi.p %hi(__region_CS1_M),gr5
+ setlo %lo(__region_CS1_M),gr5
+ sethi.p %hi(__region_CS1_C),gr6
+ setlo %lo(__region_CS1_C),gr6
+ sti gr4,@(gr10,#1*0x08)
+ sti gr5,@(gr10,#1*0x08+0x100)
+ sti gr6,@(gr11,#1*0x08)
+ sethi.p %hi(__region_CS2),gr4
+ setlo %lo(__region_CS2),gr4
+ sethi.p %hi(__region_CS2_M),gr5
+ setlo %lo(__region_CS2_M),gr5
+ sethi.p %hi(__region_CS2_C),gr6
+ setlo %lo(__region_CS2_C),gr6
+ sti gr4,@(gr10,#2*0x08)
+ sti gr5,@(gr10,#2*0x08+0x100)
+ sti gr6,@(gr11,#2*0x08)
+ sethi.p %hi(__region_CS3),gr4
+ setlo %lo(__region_CS3),gr4
+ sethi.p %hi(__region_CS3_M),gr5
+ setlo %lo(__region_CS3_M),gr5
+ sethi.p %hi(__region_CS3_C),gr6
+ setlo %lo(__region_CS3_C),gr6
+ sti gr4,@(gr10,#3*0x08)
+ sti gr5,@(gr10,#3*0x08+0x100)
+ sti gr6,@(gr11,#3*0x08)
+ sethi.p %hi(__region_CS4),gr4
+ setlo %lo(__region_CS4),gr4
+ sethi.p %hi(__region_CS4_M),gr5
+ setlo %lo(__region_CS4_M),gr5
+ sethi.p %hi(__region_CS4_C),gr6
+ setlo %lo(__region_CS4_C),gr6
+ sti gr4,@(gr10,#4*0x08)
+ sti gr5,@(gr10,#4*0x08+0x100)
+ sti gr6,@(gr11,#4*0x08)
+ sethi.p %hi(__region_CS5),gr4
+ setlo %lo(__region_CS5),gr4
+ sethi.p %hi(__region_CS5_M),gr5
+ setlo %lo(__region_CS5_M),gr5
+ sethi.p %hi(__region_CS5_C),gr6
+ setlo %lo(__region_CS5_C),gr6
+ sti gr4,@(gr10,#5*0x08)
+ sti gr5,@(gr10,#5*0x08+0x100)
+ sti gr6,@(gr11,#5*0x08)
+ sethi.p %hi(__region_CS6),gr4
+ setlo %lo(__region_CS6),gr4
+ sethi.p %hi(__region_CS6_M),gr5
+ setlo %lo(__region_CS6_M),gr5
+ sethi.p %hi(__region_CS6_C),gr6
+ setlo %lo(__region_CS6_C),gr6
+ sti gr4,@(gr10,#6*0x08)
+ sti gr5,@(gr10,#6*0x08+0x100)
+ sti gr6,@(gr11,#6*0x08)
+ sethi.p %hi(__region_CS7),gr4
+ setlo %lo(__region_CS7),gr4
+ sethi.p %hi(__region_CS7_M),gr5
+ setlo %lo(__region_CS7_M),gr5
+ sethi.p %hi(__region_CS7_C),gr6
+ setlo %lo(__region_CS7_C),gr6
+ sti gr4,@(gr10,#7*0x08)
+ sti gr5,@(gr10,#7*0x08+0x100)
+ sti gr6,@(gr11,#7*0x08)
+ membar
+ bar
+
+ # adjust LED bank address
+#ifdef CONFIG_MB93091_VDK
+ sethi.p %hi(__region_CS2 + 0x01200004),gr30
+ setlo %lo(__region_CS2 + 0x01200004),gr30
+#endif
+ bralr
+
+###############################################################################
+#
+# determine the total SDRAM size
+#
+# ENTRY: EXIT:
+# GR25 - SDRAM size
+# GR26 &__head_reference [saved]
+# GR30 LED address [saved]
+#
+###############################################################################
+ .globl __head_fr451_survey_sdram
+__head_fr451_survey_sdram:
+ sethi.p %hi(__400_DAM0),gr11
+ setlo %lo(__400_DAM0),gr11
+ sethi.p %hi(__400_DBR0),gr12
+ setlo %lo(__400_DBR0),gr12
+
+ sethi.p %hi(0xfe000000),gr17 ; unused SDRAM DBR value
+ setlo %lo(0xfe000000),gr17
+ setlos #0,gr25
+
+ ldi @(gr12,#0x00),gr4 ; DAR0
+ subcc gr4,gr17,gr0,icc0
+ beq icc0,#0,__head_no_DCS0
+ ldi @(gr11,#0x00),gr6 ; DAM0: bits 31:20 match addr 31:20
+ add gr25,gr6,gr25
+ addi gr25,#1,gr25
+__head_no_DCS0:
+
+ ldi @(gr12,#0x08),gr4 ; DAR1
+ subcc gr4,gr17,gr0,icc0
+ beq icc0,#0,__head_no_DCS1
+ ldi @(gr11,#0x08),gr6 ; DAM1: bits 31:20 match addr 31:20
+ add gr25,gr6,gr25
+ addi gr25,#1,gr25
+__head_no_DCS1:
+
+ ldi @(gr12,#0x10),gr4 ; DAR2
+ subcc gr4,gr17,gr0,icc0
+ beq icc0,#0,__head_no_DCS2
+ ldi @(gr11,#0x10),gr6 ; DAM2: bits 31:20 match addr 31:20
+ add gr25,gr6,gr25
+ addi gr25,#1,gr25
+__head_no_DCS2:
+
+ ldi @(gr12,#0x18),gr4 ; DAR3
+ subcc gr4,gr17,gr0,icc0
+ beq icc0,#0,__head_no_DCS3
+ ldi @(gr11,#0x18),gr6 ; DAM3: bits 31:20 match addr 31:20
+ add gr25,gr6,gr25
+ addi gr25,#1,gr25
+__head_no_DCS3:
+ bralr
+
+###############################################################################
+#
+# set the protection map with the I/DAMPR registers
+#
+# ENTRY: EXIT:
+# GR25 SDRAM size [saved]
+# GR26 &__head_reference [saved]
+# GR30 LED address [saved]
+#
+#
+# Using this map:
+# REGISTERS ADDRESS RANGE VIEW
+# =============== ====================== ===============================
+# IAMPR0/DAMPR0 0xC0000000-0xCFFFFFFF Cached kernel RAM Window
+# DAMPR11 0xE0000000-0xFFFFFFFF Uncached I/O
+#
+###############################################################################
+ .globl __head_fr451_set_protection
+__head_fr451_set_protection:
+ movsg lr,gr27
+
+ # set the I/O region protection registers for FR451 in MMU mode
+#define PGPROT_IO xAMPRx_L|xAMPRx_M|xAMPRx_S_KERNEL|xAMPRx_C|xAMPRx_V
+
+ sethi.p %hi(__region_IO),gr5
+ setlo %lo(__region_IO),gr5
+ setlos #PGPROT_IO|xAMPRx_SS_512Mb,gr4
+ or gr4,gr5,gr4
+ movgs gr5,damlr11 ; General I/O tile
+ movgs gr4,dampr11
+
+ # need to open a window onto at least part of the RAM for the kernel's use
+ sethi.p %hi(__sdram_base),gr8
+ setlo %lo(__sdram_base),gr8 ; physical address
+ sethi.p %hi(__page_offset),gr9
+ setlo %lo(__page_offset),gr9 ; virtual address
+
+ setlos #xAMPRx_L|xAMPRx_M|xAMPRx_SS_256Mb|xAMPRx_S_KERNEL|xAMPRx_V,gr11
+ or gr8,gr11,gr8
+
+ movgs gr9,iamlr0 ; mapped from real address 0
+ movgs gr8,iampr0 ; cached kernel memory at 0xC0000000
+ movgs gr9,damlr0
+ movgs gr8,dampr0
+
+ # set a temporary mapping for the kernel running at address 0 until we've turned on the MMU
+ sethi.p %hi(__sdram_base),gr9
+ setlo %lo(__sdram_base),gr9 ; virtual address
+
+ and.p gr4,gr11,gr4
+ and gr5,gr11,gr5
+ or.p gr4,gr11,gr4
+ or gr5,gr11,gr5
+
+ movgs gr9,iamlr1 ; mapped from real address 0
+ movgs gr8,iampr1 ; cached kernel memory at 0x00000000
+ movgs gr9,damlr1
+ movgs gr8,dampr1
+
+ # we use DAMR2-10 for kmap_atomic(), cache flush and TLB management
+ # since the DAMLR regs are not going to change, we can set them now
+ # also set up IAMLR2 to the same as DAMLR5
+ sethi.p %hi(KMAP_ATOMIC_PRIMARY_FRAME),gr4
+ setlo %lo(KMAP_ATOMIC_PRIMARY_FRAME),gr4
+ sethi.p %hi(PAGE_SIZE),gr5
+ setlo %lo(PAGE_SIZE),gr5
+
+ movgs gr4,damlr2
+ movgs gr4,iamlr2
+ add gr4,gr5,gr4
+ movgs gr4,damlr3
+ add gr4,gr5,gr4
+ movgs gr4,damlr4
+ add gr4,gr5,gr4
+ movgs gr4,damlr5
+ add gr4,gr5,gr4
+ movgs gr4,damlr6
+ add gr4,gr5,gr4
+ movgs gr4,damlr7
+ add gr4,gr5,gr4
+ movgs gr4,damlr8
+ add gr4,gr5,gr4
+ movgs gr4,damlr9
+ add gr4,gr5,gr4
+ movgs gr4,damlr10
+
+ movgs gr0,dampr2
+ movgs gr0,dampr4
+ movgs gr0,dampr5
+ movgs gr0,dampr6
+ movgs gr0,dampr7
+ movgs gr0,dampr8
+ movgs gr0,dampr9
+ movgs gr0,dampr10
+
+ movgs gr0,iamlr3
+ movgs gr0,iamlr4
+ movgs gr0,iamlr5
+ movgs gr0,iamlr6
+ movgs gr0,iamlr7
+
+ movgs gr0,iampr2
+ movgs gr0,iampr3
+ movgs gr0,iampr4
+ movgs gr0,iampr5
+ movgs gr0,iampr6
+ movgs gr0,iampr7
+
+ # start in TLB context 0 with the swapper's page tables
+ movgs gr0,cxnr
+
+ sethi.p %hi(swapper_pg_dir),gr4
+ setlo %lo(swapper_pg_dir),gr4
+ sethi.p %hi(__page_offset),gr5
+ setlo %lo(__page_offset),gr5
+ sub gr4,gr5,gr4
+ movgs gr4,ttbr
+ setlos #xAMPRx_L|xAMPRx_M|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr5
+ or gr4,gr5,gr4
+ movgs gr4,dampr3
+
+ # the FR451 also has an extra trap base register
+ movsg tbr,gr4
+ movgs gr4,btbr
+
+ LEDS 0x3300
+ jmpl @(gr27,gr0)
+
+###############################################################################
+#
+# finish setting up the protection registers
+#
+###############################################################################
+ .globl __head_fr451_finalise_protection
+__head_fr451_finalise_protection:
+ # turn on the timers as appropriate
+ movgs gr0,timerh
+ movgs gr0,timerl
+ movgs gr0,timerd
+ movsg hsr0,gr4
+ sethi.p %hi(HSR0_ETMI),gr5
+ setlo %lo(HSR0_ETMI),gr5
+ or gr4,gr5,gr4
+ movgs gr4,hsr0
+
+ # clear the TLB entry cache
+ movgs gr0,iamlr1
+ movgs gr0,iampr1
+ movgs gr0,damlr1
+ movgs gr0,dampr1
+
+ # clear the PGE cache
+ sethi.p %hi(__flush_tlb_all),gr4
+ setlo %lo(__flush_tlb_all),gr4
+ jmpl @(gr4,gr0)
diff --git a/arch/frv/kernel/head-uc-fr401.S b/arch/frv/kernel/head-uc-fr401.S
new file mode 100644
index 00000000000..438643cfa38
--- /dev/null
+++ b/arch/frv/kernel/head-uc-fr401.S
@@ -0,0 +1,311 @@
+/* head-uc-fr401.S: FR401/3/5 uc-linux specific bits of initialisation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/spr-regs.h>
+#include <asm/mb86943a.h>
+#include "head.inc"
+
+
+#define __400_DBR0 0xfe000e00
+#define __400_DBR1 0xfe000e08
+#define __400_DBR2 0xfe000e10 /* not on FR401 */
+#define __400_DBR3 0xfe000e18 /* not on FR401 */
+#define __400_DAM0 0xfe000f00
+#define __400_DAM1 0xfe000f08
+#define __400_DAM2 0xfe000f10 /* not on FR401 */
+#define __400_DAM3 0xfe000f18 /* not on FR401 */
+#define __400_LGCR 0xfe000010
+#define __400_LCR 0xfe000100
+#define __400_LSBR 0xfe000c00
+
+ __INIT
+ .balign 4
+
+###############################################################################
+#
+# describe the position and layout of the SDRAM controller registers
+#
+# ENTRY: EXIT:
+# GR5 - cacheline size
+# GR11 - displacement of 2nd SDRAM addr reg from GR14
+# GR12 - displacement of 3rd SDRAM addr reg from GR14
+# GR13 - displacement of 4th SDRAM addr reg from GR14
+# GR14 - address of 1st SDRAM addr reg
+# GR15 - amount to shift address by to match SDRAM addr reg
+# GR26 &__head_reference [saved]
+# GR30 LED address [saved]
+# CC0 - T if DBR0 is present
+# CC1 - T if DBR1 is present
+# CC2 - T if DBR2 is present (not FR401/FR401A)
+# CC3 - T if DBR3 is present (not FR401/FR401A)
+#
+###############################################################################
+ .globl __head_fr401_describe_sdram
+__head_fr401_describe_sdram:
+ sethi.p %hi(__400_DBR0),gr14
+ setlo %lo(__400_DBR0),gr14
+ setlos.p #__400_DBR1-__400_DBR0,gr11
+ setlos #__400_DBR2-__400_DBR0,gr12
+ setlos.p #__400_DBR3-__400_DBR0,gr13
+ setlos #32,gr5 ; cacheline size
+ setlos.p #0,gr15 ; amount to shift addr reg by
+
+ # specify which DBR regs are present
+ setlos #0x00ff,gr4
+ movgs gr4,cccr
+ movsg psr,gr3 ; check for FR401/FR401A
+ srli gr3,#25,gr3
+ subicc gr3,#0x20>>1,gr0,icc0
+ bnelr icc0,#1
+ setlos #0x000f,gr4
+ movgs gr4,cccr
+ bralr
+
+###############################################################################
+#
+# rearrange the bus controller registers
+#
+# ENTRY: EXIT:
+# GR26 &__head_reference [saved]
+# GR30 LED address revised LED address
+#
+###############################################################################
+ .globl __head_fr401_set_busctl
+__head_fr401_set_busctl:
+ sethi.p %hi(__400_LGCR),gr4
+ setlo %lo(__400_LGCR),gr4
+ sethi.p %hi(__400_LSBR),gr10
+ setlo %lo(__400_LSBR),gr10
+ sethi.p %hi(__400_LCR),gr11
+ setlo %lo(__400_LCR),gr11
+
+ # set the bus controller
+ ldi @(gr4,#0),gr5
+ ori gr5,#0xff,gr5 ; make sure all chip-selects are enabled
+ sti gr5,@(gr4,#0)
+
+ sethi.p %hi(__region_CS1),gr4
+ setlo %lo(__region_CS1),gr4
+ sethi.p %hi(__region_CS1_M),gr5
+ setlo %lo(__region_CS1_M),gr5
+ sethi.p %hi(__region_CS1_C),gr6
+ setlo %lo(__region_CS1_C),gr6
+ sti gr4,@(gr10,#1*0x08)
+ sti gr5,@(gr10,#1*0x08+0x100)
+ sti gr6,@(gr11,#1*0x08)
+ sethi.p %hi(__region_CS2),gr4
+ setlo %lo(__region_CS2),gr4
+ sethi.p %hi(__region_CS2_M),gr5
+ setlo %lo(__region_CS2_M),gr5
+ sethi.p %hi(__region_CS2_C),gr6
+ setlo %lo(__region_CS2_C),gr6
+ sti gr4,@(gr10,#2*0x08)
+ sti gr5,@(gr10,#2*0x08+0x100)
+ sti gr6,@(gr11,#2*0x08)
+ sethi.p %hi(__region_CS3),gr4
+ setlo %lo(__region_CS3),gr4
+ sethi.p %hi(__region_CS3_M),gr5
+ setlo %lo(__region_CS3_M),gr5
+ sethi.p %hi(__region_CS3_C),gr6
+ setlo %lo(__region_CS3_C),gr6
+ sti gr4,@(gr10,#3*0x08)
+ sti gr5,@(gr10,#3*0x08+0x100)
+ sti gr6,@(gr11,#3*0x08)
+ sethi.p %hi(__region_CS4),gr4
+ setlo %lo(__region_CS4),gr4
+ sethi.p %hi(__region_CS4_M),gr5
+ setlo %lo(__region_CS4_M),gr5
+ sethi.p %hi(__region_CS4_C),gr6
+ setlo %lo(__region_CS4_C),gr6
+ sti gr4,@(gr10,#4*0x08)
+ sti gr5,@(gr10,#4*0x08+0x100)
+ sti gr6,@(gr11,#4*0x08)
+ sethi.p %hi(__region_CS5),gr4
+ setlo %lo(__region_CS5),gr4
+ sethi.p %hi(__region_CS5_M),gr5
+ setlo %lo(__region_CS5_M),gr5
+ sethi.p %hi(__region_CS5_C),gr6
+ setlo %lo(__region_CS5_C),gr6
+ sti gr4,@(gr10,#5*0x08)
+ sti gr5,@(gr10,#5*0x08+0x100)
+ sti gr6,@(gr11,#5*0x08)
+ sethi.p %hi(__region_CS6),gr4
+ setlo %lo(__region_CS6),gr4
+ sethi.p %hi(__region_CS6_M),gr5
+ setlo %lo(__region_CS6_M),gr5
+ sethi.p %hi(__region_CS6_C),gr6
+ setlo %lo(__region_CS6_C),gr6
+ sti gr4,@(gr10,#6*0x08)
+ sti gr5,@(gr10,#6*0x08+0x100)
+ sti gr6,@(gr11,#6*0x08)
+ sethi.p %hi(__region_CS7),gr4
+ setlo %lo(__region_CS7),gr4
+ sethi.p %hi(__region_CS7_M),gr5
+ setlo %lo(__region_CS7_M),gr5
+ sethi.p %hi(__region_CS7_C),gr6
+ setlo %lo(__region_CS7_C),gr6
+ sti gr4,@(gr10,#7*0x08)
+ sti gr5,@(gr10,#7*0x08+0x100)
+ sti gr6,@(gr11,#7*0x08)
+ membar
+ bar
+
+ # adjust LED bank address
+ sethi.p %hi(LED_ADDR - 0x20000000 +__region_CS2),gr30
+ setlo %lo(LED_ADDR - 0x20000000 +__region_CS2),gr30
+ bralr
+
+###############################################################################
+#
+# determine the total SDRAM size
+#
+# ENTRY: EXIT:
+# GR25 - SDRAM size
+# GR26 &__head_reference [saved]
+# GR30 LED address [saved]
+#
+###############################################################################
+ .globl __head_fr401_survey_sdram
+__head_fr401_survey_sdram:
+ sethi.p %hi(__400_DAM0),gr11
+ setlo %lo(__400_DAM0),gr11
+ sethi.p %hi(__400_DBR0),gr12
+ setlo %lo(__400_DBR0),gr12
+
+ sethi.p %hi(0xfe000000),gr17 ; unused SDRAM DBR value
+ setlo %lo(0xfe000000),gr17
+ setlos #0,gr25
+
+ ldi @(gr12,#0x00),gr4 ; DAR0
+ subcc gr4,gr17,gr0,icc0
+ beq icc0,#0,__head_no_DCS0
+ ldi @(gr11,#0x00),gr6 ; DAM0: bits 31:20 match addr 31:20
+ add gr25,gr6,gr25
+ addi gr25,#1,gr25
+__head_no_DCS0:
+
+ ldi @(gr12,#0x08),gr4 ; DAR1
+ subcc gr4,gr17,gr0,icc0
+ beq icc0,#0,__head_no_DCS1
+ ldi @(gr11,#0x08),gr6 ; DAM1: bits 31:20 match addr 31:20
+ add gr25,gr6,gr25
+ addi gr25,#1,gr25
+__head_no_DCS1:
+
+ # FR401/FR401A does not have DCS2/3
+ movsg psr,gr3
+ srli gr3,#25,gr3
+ subicc gr3,#0x20>>1,gr0,icc0
+ beq icc0,#0,__head_no_DCS3
+
+ ldi @(gr12,#0x10),gr4 ; DAR2
+ subcc gr4,gr17,gr0,icc0
+ beq icc0,#0,__head_no_DCS2
+ ldi @(gr11,#0x10),gr6 ; DAM2: bits 31:20 match addr 31:20
+ add gr25,gr6,gr25
+ addi gr25,#1,gr25
+__head_no_DCS2:
+
+ ldi @(gr12,#0x18),gr4 ; DAR3
+ subcc gr4,gr17,gr0,icc0
+ beq icc0,#0,__head_no_DCS3
+ ldi @(gr11,#0x18),gr6 ; DAM3: bits 31:20 match addr 31:20
+ add gr25,gr6,gr25
+ addi gr25,#1,gr25
+__head_no_DCS3:
+ bralr
+
+###############################################################################
+#
+# set the protection map with the I/DAMPR registers
+#
+# ENTRY: EXIT:
+# GR25 SDRAM size [saved]
+# GR26 &__head_reference [saved]
+# GR30 LED address [saved]
+#
+###############################################################################
+ .globl __head_fr401_set_protection
+__head_fr401_set_protection:
+ movsg lr,gr27
+
+ # set the I/O region protection registers for FR401/3/5
+ sethi.p %hi(__region_IO),gr5
+ setlo %lo(__region_IO),gr5
+ ori gr5,#xAMPRx_SS_512Mb|xAMPRx_S_KERNEL|xAMPRx_C|xAMPRx_V,gr5
+ movgs gr0,iampr7
+ movgs gr5,dampr7 ; General I/O tile
+
+ # need to tile the remaining IAMPR/DAMPR registers to cover as much of the RAM as possible
+ # - start with the highest numbered registers
+ sethi.p %hi(__kernel_image_end),gr8
+ setlo %lo(__kernel_image_end),gr8
+ sethi.p %hi(32768),gr4 ; allow for a maximal allocator bitmap
+ setlo %lo(32768),gr4
+ add gr8,gr4,gr8
+ sethi.p %hi(1024*2048-1),gr4 ; round up to nearest 2MiB
+ setlo %lo(1024*2048-1),gr4
+ add.p gr8,gr4,gr8
+ not gr4,gr4
+ and gr8,gr4,gr8
+
+ sethi.p %hi(__page_offset),gr9
+ setlo %lo(__page_offset),gr9
+ add gr9,gr25,gr9
+
+ # GR8 = base of uncovered RAM
+ # GR9 = top of uncovered RAM
+
+#ifdef CONFIG_MB93093_PDK
+ sethi.p %hi(__region_CS2),gr4
+ setlo %lo(__region_CS2),gr4
+ ori gr4,#xAMPRx_SS_1Mb|xAMPRx_S_KERNEL|xAMPRx_C|xAMPRx_V,gr4
+ movgs gr4,dampr6
+ movgs gr0,iampr6
+#else
+ call __head_split_region
+ movgs gr4,iampr6
+ movgs gr5,dampr6
+#endif
+ call __head_split_region
+ movgs gr4,iampr5
+ movgs gr5,dampr5
+ call __head_split_region
+ movgs gr4,iampr4
+ movgs gr5,dampr4
+ call __head_split_region
+ movgs gr4,iampr3
+ movgs gr5,dampr3
+ call __head_split_region
+ movgs gr4,iampr2
+ movgs gr5,dampr2
+ call __head_split_region
+ movgs gr4,iampr1
+ movgs gr5,dampr1
+
+ # cover kernel core image with kernel-only segment
+ sethi.p %hi(__page_offset),gr8
+ setlo %lo(__page_offset),gr8
+ call __head_split_region
+
+#ifdef CONFIG_PROTECT_KERNEL
+ ori.p gr4,#xAMPRx_S_KERNEL,gr4
+ ori gr5,#xAMPRx_S_KERNEL,gr5
+#endif
+
+ movgs gr4,iampr0
+ movgs gr5,dampr0
+ jmpl @(gr27,gr0)
diff --git a/arch/frv/kernel/head-uc-fr451.S b/arch/frv/kernel/head-uc-fr451.S
new file mode 100644
index 00000000000..b2a76c4a178
--- /dev/null
+++ b/arch/frv/kernel/head-uc-fr451.S
@@ -0,0 +1,174 @@
+/* head-uc-fr451.S: FR451 uc-linux specific bits of initialisation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/spr-regs.h>
+#include <asm/mb86943a.h>
+#include "head.inc"
+
+
+#define __400_DBR0 0xfe000e00
+#define __400_DBR1 0xfe000e08
+#define __400_DBR2 0xfe000e10
+#define __400_DBR3 0xfe000e18
+#define __400_DAM0 0xfe000f00
+#define __400_DAM1 0xfe000f08
+#define __400_DAM2 0xfe000f10
+#define __400_DAM3 0xfe000f18
+#define __400_LGCR 0xfe000010
+#define __400_LCR 0xfe000100
+#define __400_LSBR 0xfe000c00
+
+ __INIT
+ .balign 4
+
+###############################################################################
+#
+# set the protection map with the I/DAMPR registers
+#
+# ENTRY: EXIT:
+# GR25 SDRAM size [saved]
+# GR26 &__head_reference [saved]
+# GR30 LED address [saved]
+#
+###############################################################################
+ .globl __head_fr451_set_protection
+__head_fr451_set_protection:
+ movsg lr,gr27
+
+ movgs gr0,dampr10
+ movgs gr0,damlr10
+ movgs gr0,dampr9
+ movgs gr0,damlr9
+ movgs gr0,dampr8
+ movgs gr0,damlr8
+
+ # set the I/O region protection registers for FR401/3/5
+ sethi.p %hi(__region_IO),gr5
+ setlo %lo(__region_IO),gr5
+ sethi.p %hi(0x1fffffff),gr7
+ setlo %lo(0x1fffffff),gr7
+ ori gr5,#xAMPRx_SS_512Mb|xAMPRx_S_KERNEL|xAMPRx_C|xAMPRx_V,gr5
+ movgs gr5,dampr11 ; General I/O tile
+ movgs gr7,damlr11
+
+ # need to tile the remaining IAMPR/DAMPR registers to cover as much of the RAM as possible
+ # - start with the highest numbered registers
+ sethi.p %hi(__kernel_image_end),gr8
+ setlo %lo(__kernel_image_end),gr8
+ sethi.p %hi(32768),gr4 ; allow for a maximal allocator bitmap
+ setlo %lo(32768),gr4
+ add gr8,gr4,gr8
+ sethi.p %hi(1024*2048-1),gr4 ; round up to nearest 2MiB
+ setlo %lo(1024*2048-1),gr4
+ add.p gr8,gr4,gr8
+ not gr4,gr4
+ and gr8,gr4,gr8
+
+ sethi.p %hi(__page_offset),gr9
+ setlo %lo(__page_offset),gr9
+ add gr9,gr25,gr9
+
+ sethi.p %hi(0xffffc000),gr11
+ setlo %lo(0xffffc000),gr11
+
+ # GR8 = base of uncovered RAM
+ # GR9 = top of uncovered RAM
+ # GR11 = xAMLR mask
+ LEDS 0x3317
+ call __head_split_region
+ movgs gr4,iampr7
+ movgs gr6,iamlr7
+ movgs gr5,dampr7
+ movgs gr7,damlr7
+
+ LEDS 0x3316
+ call __head_split_region
+ movgs gr4,iampr6
+ movgs gr6,iamlr6
+ movgs gr5,dampr6
+ movgs gr7,damlr6
+
+ LEDS 0x3315
+ call __head_split_region
+ movgs gr4,iampr5
+ movgs gr6,iamlr5
+ movgs gr5,dampr5
+ movgs gr7,damlr5
+
+ LEDS 0x3314
+ call __head_split_region
+ movgs gr4,iampr4
+ movgs gr6,iamlr4
+ movgs gr5,dampr4
+ movgs gr7,damlr4
+
+ LEDS 0x3313
+ call __head_split_region
+ movgs gr4,iampr3
+ movgs gr6,iamlr3
+ movgs gr5,dampr3
+ movgs gr7,damlr3
+
+ LEDS 0x3312
+ call __head_split_region
+ movgs gr4,iampr2
+ movgs gr6,iamlr2
+ movgs gr5,dampr2
+ movgs gr7,damlr2
+
+ LEDS 0x3311
+ call __head_split_region
+ movgs gr4,iampr1
+ movgs gr6,iamlr1
+ movgs gr5,dampr1
+ movgs gr7,damlr1
+
+ # cover kernel core image with kernel-only segment
+ LEDS 0x3310
+ sethi.p %hi(__page_offset),gr8
+ setlo %lo(__page_offset),gr8
+ call __head_split_region
+
+#ifdef CONFIG_PROTECT_KERNEL
+ ori.p gr4,#xAMPRx_S_KERNEL,gr4
+ ori gr5,#xAMPRx_S_KERNEL,gr5
+#endif
+
+ movgs gr4,iampr0
+ movgs gr6,iamlr0
+ movgs gr5,dampr0
+ movgs gr7,damlr0
+
+ # start in TLB context 0 with no page tables
+ movgs gr0,cxnr
+ movgs gr0,ttbr
+
+ # the FR451 also has an extra trap base register
+ movsg tbr,gr4
+ movgs gr4,btbr
+
+ # turn on the timers as appropriate
+ movgs gr0,timerh
+ movgs gr0,timerl
+ movgs gr0,timerd
+ movsg hsr0,gr4
+ sethi.p %hi(HSR0_ETMI),gr5
+ setlo %lo(HSR0_ETMI),gr5
+ or gr4,gr5,gr4
+ movgs gr4,hsr0
+
+ LEDS 0x3300
+ jmpl @(gr27,gr0)
diff --git a/arch/frv/kernel/head-uc-fr555.S b/arch/frv/kernel/head-uc-fr555.S
new file mode 100644
index 00000000000..5497aaf34f7
--- /dev/null
+++ b/arch/frv/kernel/head-uc-fr555.S
@@ -0,0 +1,347 @@
+/* head-uc-fr555.S: FR555 uc-linux specific bits of initialisation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/spr-regs.h>
+#include <asm/mb86943a.h>
+#include "head.inc"
+
+
+#define __551_DARS0 0xfeff0100
+#define __551_DARS1 0xfeff0104
+#define __551_DARS2 0xfeff0108
+#define __551_DARS3 0xfeff010c
+#define __551_DAMK0 0xfeff0110
+#define __551_DAMK1 0xfeff0114
+#define __551_DAMK2 0xfeff0118
+#define __551_DAMK3 0xfeff011c
+#define __551_LCR 0xfeff1100
+#define __551_LSBR 0xfeff1c00
+
+ __INIT
+ .balign 4
+
+###############################################################################
+#
+# describe the position and layout of the SDRAM controller registers
+#
+# ENTRY: EXIT:
+# GR5 - cacheline size
+# GR11 - displacement of 2nd SDRAM addr reg from GR14
+# GR12 - displacement of 3rd SDRAM addr reg from GR14
+# GR13 - displacement of 4th SDRAM addr reg from GR14
+# GR14 - address of 1st SDRAM addr reg
+# GR15 - amount to shift address by to match SDRAM addr reg
+# GR26 &__head_reference [saved]
+# GR30 LED address [saved]
+# CC0 - T if DARS0 is present
+# CC1 - T if DARS1 is present
+# CC2 - T if DARS2 is present
+# CC3 - T if DARS3 is present
+#
+###############################################################################
+ .globl __head_fr555_describe_sdram
+__head_fr555_describe_sdram:
+ sethi.p %hi(__551_DARS0),gr14
+ setlo %lo(__551_DARS0),gr14
+ setlos.p #__551_DARS1-__551_DARS0,gr11
+ setlos #__551_DARS2-__551_DARS0,gr12
+ setlos.p #__551_DARS3-__551_DARS0,gr13
+ setlos #64,gr5 ; cacheline size
+ setlos #20,gr15 ; amount to shift addr by
+ setlos #0x00ff,gr4
+ movgs gr4,cccr ; extant DARS/DAMK regs
+ bralr
+
+###############################################################################
+#
+# rearrange the bus controller registers
+#
+# ENTRY: EXIT:
+# GR26 &__head_reference [saved]
+# GR30 LED address revised LED address
+#
+###############################################################################
+ .globl __head_fr555_set_busctl
+__head_fr555_set_busctl:
+ LEDS 0x100f
+ sethi.p %hi(__551_LSBR),gr10
+ setlo %lo(__551_LSBR),gr10
+ sethi.p %hi(__551_LCR),gr11
+ setlo %lo(__551_LCR),gr11
+
+ # set the bus controller
+ sethi.p %hi(__region_CS1),gr4
+ setlo %lo(__region_CS1),gr4
+ sethi.p %hi(__region_CS1_M),gr5
+ setlo %lo(__region_CS1_M),gr5
+ sethi.p %hi(__region_CS1_C),gr6
+ setlo %lo(__region_CS1_C),gr6
+ sti gr4,@(gr10,#1*0x08)
+ sti gr5,@(gr10,#1*0x08+0x100)
+ sti gr6,@(gr11,#1*0x08)
+ sethi.p %hi(__region_CS2),gr4
+ setlo %lo(__region_CS2),gr4
+ sethi.p %hi(__region_CS2_M),gr5
+ setlo %lo(__region_CS2_M),gr5
+ sethi.p %hi(__region_CS2_C),gr6
+ setlo %lo(__region_CS2_C),gr6
+ sti gr4,@(gr10,#2*0x08)
+ sti gr5,@(gr10,#2*0x08+0x100)
+ sti gr6,@(gr11,#2*0x08)
+ sethi.p %hi(__region_CS3),gr4
+ setlo %lo(__region_CS3),gr4
+ sethi.p %hi(__region_CS3_M),gr5
+ setlo %lo(__region_CS3_M),gr5
+ sethi.p %hi(__region_CS3_C),gr6
+ setlo %lo(__region_CS3_C),gr6
+ sti gr4,@(gr10,#3*0x08)
+ sti gr5,@(gr10,#3*0x08+0x100)
+ sti gr6,@(gr11,#3*0x08)
+ sethi.p %hi(__region_CS4),gr4
+ setlo %lo(__region_CS4),gr4
+ sethi.p %hi(__region_CS4_M),gr5
+ setlo %lo(__region_CS4_M),gr5
+ sethi.p %hi(__region_CS4_C),gr6
+ setlo %lo(__region_CS4_C),gr6
+ sti gr4,@(gr10,#4*0x08)
+ sti gr5,@(gr10,#4*0x08+0x100)
+ sti gr6,@(gr11,#4*0x08)
+ sethi.p %hi(__region_CS5),gr4
+ setlo %lo(__region_CS5),gr4
+ sethi.p %hi(__region_CS5_M),gr5
+ setlo %lo(__region_CS5_M),gr5
+ sethi.p %hi(__region_CS5_C),gr6
+ setlo %lo(__region_CS5_C),gr6
+ sti gr4,@(gr10,#5*0x08)
+ sti gr5,@(gr10,#5*0x08+0x100)
+ sti gr6,@(gr11,#5*0x08)
+ sethi.p %hi(__region_CS6),gr4
+ setlo %lo(__region_CS6),gr4
+ sethi.p %hi(__region_CS6_M),gr5
+ setlo %lo(__region_CS6_M),gr5
+ sethi.p %hi(__region_CS6_C),gr6
+ setlo %lo(__region_CS6_C),gr6
+ sti gr4,@(gr10,#6*0x08)
+ sti gr5,@(gr10,#6*0x08+0x100)
+ sti gr6,@(gr11,#6*0x08)
+ sethi.p %hi(__region_CS7),gr4
+ setlo %lo(__region_CS7),gr4
+ sethi.p %hi(__region_CS7_M),gr5
+ setlo %lo(__region_CS7_M),gr5
+ sethi.p %hi(__region_CS7_C),gr6
+ setlo %lo(__region_CS7_C),gr6
+ sti gr4,@(gr10,#7*0x08)
+ sti gr5,@(gr10,#7*0x08+0x100)
+ sti gr6,@(gr11,#7*0x08)
+ membar
+ bar
+
+ # adjust LED bank address
+#ifdef CONFIG_MB93091_VDK
+ sethi.p %hi(LED_ADDR - 0x20000000 +__region_CS2),gr30
+ setlo %lo(LED_ADDR - 0x20000000 +__region_CS2),gr30
+#endif
+ bralr
+
+###############################################################################
+#
+# determine the total SDRAM size
+#
+# ENTRY: EXIT:
+# GR25 - SDRAM size
+# GR26 &__head_reference [saved]
+# GR30 LED address [saved]
+#
+###############################################################################
+ .globl __head_fr555_survey_sdram
+__head_fr555_survey_sdram:
+ sethi.p %hi(__551_DAMK0),gr11
+ setlo %lo(__551_DAMK0),gr11
+ sethi.p %hi(__551_DARS0),gr12
+ setlo %lo(__551_DARS0),gr12
+
+ sethi.p %hi(0xfff),gr17 ; unused SDRAM AMK value
+ setlo %lo(0xfff),gr17
+ setlos #0,gr25
+
+ ldi @(gr11,#0x00),gr6 ; DAMK0: bits 11:0 match addr 11:0
+ subcc gr6,gr17,gr0,icc0
+ beq icc0,#0,__head_no_DCS0
+ ldi @(gr12,#0x00),gr4 ; DARS0
+ add gr25,gr6,gr25
+ addi gr25,#1,gr25
+__head_no_DCS0:
+
+ ldi @(gr11,#0x04),gr6 ; DAMK1: bits 11:0 match addr 11:0
+ subcc gr6,gr17,gr0,icc0
+ beq icc0,#0,__head_no_DCS1
+ ldi @(gr12,#0x04),gr4 ; DARS1
+ add gr25,gr6,gr25
+ addi gr25,#1,gr25
+__head_no_DCS1:
+
+ ldi @(gr11,#0x8),gr6 ; DAMK2: bits 11:0 match addr 11:0
+ subcc gr6,gr17,gr0,icc0
+ beq icc0,#0,__head_no_DCS2
+ ldi @(gr12,#0x8),gr4 ; DARS2
+ add gr25,gr6,gr25
+ addi gr25,#1,gr25
+__head_no_DCS2:
+
+ ldi @(gr11,#0xc),gr6 ; DAMK3: bits 11:0 match addr 11:0
+ subcc gr6,gr17,gr0,icc0
+ beq icc0,#0,__head_no_DCS3
+ ldi @(gr12,#0xc),gr4 ; DARS3
+ add gr25,gr6,gr25
+ addi gr25,#1,gr25
+__head_no_DCS3:
+
+ slli gr25,#20,gr25 ; shift [11:0] -> [31:20]
+ bralr
+
+###############################################################################
+#
+# set the protection map with the I/DAMPR registers
+#
+# ENTRY: EXIT:
+# GR25 SDRAM size saved
+# GR30 LED address saved
+#
+###############################################################################
+ .globl __head_fr555_set_protection
+__head_fr555_set_protection:
+ movsg lr,gr27
+
+ sethi.p %hi(0xfff00000),gr11
+ setlo %lo(0xfff00000),gr11
+
+ # set the I/O region protection registers for FR555
+ sethi.p %hi(__region_IO),gr7
+ setlo %lo(__region_IO),gr7
+ ori gr7,#xAMPRx_SS_512Mb|xAMPRx_S_KERNEL|xAMPRx_C|xAMPRx_V,gr5
+ movgs gr0,iampr15
+ movgs gr0,iamlr15
+ movgs gr5,dampr15
+ movgs gr7,damlr15
+
+ # need to tile the remaining IAMPR/DAMPR registers to cover as much of the RAM as possible
+ # - start with the highest numbered registers
+ sethi.p %hi(__kernel_image_end),gr8
+ setlo %lo(__kernel_image_end),gr8
+ sethi.p %hi(32768),gr4 ; allow for a maximal allocator bitmap
+ setlo %lo(32768),gr4
+ add gr8,gr4,gr8
+ sethi.p %hi(1024*2048-1),gr4 ; round up to nearest 2MiB
+ setlo %lo(1024*2048-1),gr4
+ add.p gr8,gr4,gr8
+ not gr4,gr4
+ and gr8,gr4,gr8
+
+ sethi.p %hi(__page_offset),gr9
+ setlo %lo(__page_offset),gr9
+ add gr9,gr25,gr9
+
+ # GR8 = base of uncovered RAM
+ # GR9 = top of uncovered RAM
+ # GR11 - mask for DAMLR/IAMLR regs
+ #
+ call __head_split_region
+ movgs gr4,iampr14
+ movgs gr6,iamlr14
+ movgs gr5,dampr14
+ movgs gr7,damlr14
+ call __head_split_region
+ movgs gr4,iampr13
+ movgs gr6,iamlr13
+ movgs gr5,dampr13
+ movgs gr7,damlr13
+ call __head_split_region
+ movgs gr4,iampr12
+ movgs gr6,iamlr12
+ movgs gr5,dampr12
+ movgs gr7,damlr12
+ call __head_split_region
+ movgs gr4,iampr11
+ movgs gr6,iamlr11
+ movgs gr5,dampr11
+ movgs gr7,damlr11
+ call __head_split_region
+ movgs gr4,iampr10
+ movgs gr6,iamlr10
+ movgs gr5,dampr10
+ movgs gr7,damlr10
+ call __head_split_region
+ movgs gr4,iampr9
+ movgs gr6,iamlr9
+ movgs gr5,dampr9
+ movgs gr7,damlr9
+ call __head_split_region
+ movgs gr4,iampr8
+ movgs gr6,iamlr8
+ movgs gr5,dampr8
+ movgs gr7,damlr8
+
+ call __head_split_region
+ movgs gr4,iampr7
+ movgs gr6,iamlr7
+ movgs gr5,dampr7
+ movgs gr7,damlr7
+ call __head_split_region
+ movgs gr4,iampr6
+ movgs gr6,iamlr6
+ movgs gr5,dampr6
+ movgs gr7,damlr6
+ call __head_split_region
+ movgs gr4,iampr5
+ movgs gr6,iamlr5
+ movgs gr5,dampr5
+ movgs gr7,damlr5
+ call __head_split_region
+ movgs gr4,iampr4
+ movgs gr6,iamlr4
+ movgs gr5,dampr4
+ movgs gr7,damlr4
+ call __head_split_region
+ movgs gr4,iampr3
+ movgs gr6,iamlr3
+ movgs gr5,dampr3
+ movgs gr7,damlr3
+ call __head_split_region
+ movgs gr4,iampr2
+ movgs gr6,iamlr2
+ movgs gr5,dampr2
+ movgs gr7,damlr2
+ call __head_split_region
+ movgs gr4,iampr1
+ movgs gr6,iamlr1
+ movgs gr5,dampr1
+ movgs gr7,damlr1
+
+ # cover kernel core image with kernel-only segment
+ sethi.p %hi(__page_offset),gr8
+ setlo %lo(__page_offset),gr8
+ call __head_split_region
+
+#ifdef CONFIG_PROTECT_KERNEL
+ ori.p gr4,#xAMPRx_S_KERNEL,gr4
+ ori gr5,#xAMPRx_S_KERNEL,gr5
+#endif
+
+ movgs gr4,iampr0
+ movgs gr6,iamlr0
+ movgs gr5,dampr0
+ movgs gr7,damlr0
+ jmpl @(gr27,gr0)
diff --git a/arch/frv/kernel/head.S b/arch/frv/kernel/head.S
new file mode 100644
index 00000000000..e9a8cc63ac9
--- /dev/null
+++ b/arch/frv/kernel/head.S
@@ -0,0 +1,643 @@
+/* head.S: kernel entry point for FR-V kernel
+ *
+ * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/spr-regs.h>
+#include <asm/mb86943a.h>
+#include <asm/cache.h>
+#include "head.inc"
+
+###############################################################################
+#
+# void _boot(unsigned long magic, char *command_line) __attribute__((noreturn))
+#
+# - if magic is 0xdead1eaf, then command_line is assumed to point to the kernel
+# command line string
+#
+###############################################################################
+ __HEAD
+ .balign 4
+
+ .globl _boot, __head_reference
+ .type _boot,@function
+_boot:
+__head_reference:
+ sethi.p %hi(LED_ADDR),gr30
+ setlo %lo(LED_ADDR),gr30
+
+ LEDS 0x0000
+
+ # calculate reference address for PC-relative stuff
+ call 0f
+0: movsg lr,gr26
+ addi gr26,#__head_reference-0b,gr26
+
+ # invalidate and disable both of the caches and turn off the memory access checking
+ dcef @(gr0,gr0),1
+ bar
+
+ sethi.p %hi(~(HSR0_ICE|HSR0_DCE|HSR0_CBM|HSR0_EIMMU|HSR0_EDMMU)),gr4
+ setlo %lo(~(HSR0_ICE|HSR0_DCE|HSR0_CBM|HSR0_EIMMU|HSR0_EDMMU)),gr4
+ movsg hsr0,gr5
+ and gr4,gr5,gr5
+ movgs gr5,hsr0
+ movsg hsr0,gr5
+
+ LEDS 0x0001
+
+ icei @(gr0,gr0),1
+ dcei @(gr0,gr0),1
+ bar
+
+ # turn the instruction cache back on
+ sethi.p %hi(HSR0_ICE),gr4
+ setlo %lo(HSR0_ICE),gr4
+ movsg hsr0,gr5
+ or gr4,gr5,gr5
+ movgs gr5,hsr0
+ movsg hsr0,gr5
+
+ bar
+
+ LEDS 0x0002
+
+ # retrieve the parameters (including command line) before we overwrite them
+ sethi.p %hi(0xdead1eaf),gr7
+ setlo %lo(0xdead1eaf),gr7
+ subcc gr7,gr8,gr0,icc0
+ bne icc0,#0,__head_no_parameters
+
+ sethi.p %hi(redboot_command_line-1),gr6
+ setlo %lo(redboot_command_line-1),gr6
+ sethi.p %hi(__head_reference),gr4
+ setlo %lo(__head_reference),gr4
+ sub gr6,gr4,gr6
+ add.p gr6,gr26,gr6
+ subi gr9,#1,gr9
+ setlos.p #511,gr4
+ setlos #1,gr5
+
+__head_copy_cmdline:
+ ldubu.p @(gr9,gr5),gr16
+ subicc gr4,#1,gr4,icc0
+ stbu.p gr16,@(gr6,gr5)
+ subicc gr16,#0,gr0,icc1
+ bls icc0,#0,__head_end_cmdline
+ bne icc1,#1,__head_copy_cmdline
+__head_end_cmdline:
+ stbu gr0,@(gr6,gr5)
+__head_no_parameters:
+
+###############################################################################
+#
+# we need to relocate the SDRAM to 0x00000000 (linux) or 0xC0000000 (uClinux)
+# - note that we're going to have to run entirely out of the icache whilst
+# fiddling with the SDRAM controller registers
+#
+###############################################################################
+#ifdef CONFIG_MMU
+ call __head_fr451_describe_sdram
+
+#else
+ movsg psr,gr5
+ srli gr5,#28,gr5
+ subicc gr5,#3,gr0,icc0
+ beq icc0,#0,__head_fr551_sdram
+
+ call __head_fr401_describe_sdram
+ bra __head_do_sdram
+
+__head_fr551_sdram:
+ call __head_fr555_describe_sdram
+ LEDS 0x000d
+
+__head_do_sdram:
+#endif
+
+ # preload the registers with invalid values in case any DBR/DARS are marked not present
+ sethi.p %hi(0xfe000000),gr17 ; unused SDRAM DBR value
+ setlo %lo(0xfe000000),gr17
+ or.p gr17,gr0,gr20
+ or gr17,gr0,gr21
+ or.p gr17,gr0,gr22
+ or gr17,gr0,gr23
+
+ # consult the SDRAM controller CS address registers
+ cld @(gr14,gr0 ),gr20, cc0,#1 ; DBR0 / DARS0
+ cld @(gr14,gr11),gr21, cc1,#1 ; DBR1 / DARS1
+ cld @(gr14,gr12),gr22, cc2,#1 ; DBR2 / DARS2
+ cld.p @(gr14,gr13),gr23, cc3,#1 ; DBR3 / DARS3
+
+ sll gr20,gr15,gr20 ; shift values up for FR551
+ sll gr21,gr15,gr21
+ sll gr22,gr15,gr22
+ sll gr23,gr15,gr23
+
+ LEDS 0x0003
+
+ # assume the lowest valid CS line to be the SDRAM base and get its address
+ subcc gr20,gr17,gr0,icc0
+ subcc.p gr21,gr17,gr0,icc1
+ subcc gr22,gr17,gr0,icc2
+ subcc.p gr23,gr17,gr0,icc3
+ ckne icc0,cc4 ; T if DBR0 != 0xfe000000
+ ckne icc1,cc5
+ ckne icc2,cc6
+ ckne icc3,cc7
+ cor gr23,gr0,gr24, cc7,#1 ; GR24 = SDRAM base
+ cor gr22,gr0,gr24, cc6,#1
+ cor gr21,gr0,gr24, cc5,#1
+ cor gr20,gr0,gr24, cc4,#1
+
+ # calculate the displacement required to get the SDRAM into the right place in memory
+ sethi.p %hi(__sdram_base),gr16
+ setlo %lo(__sdram_base),gr16
+ sub gr16,gr24,gr16 ; delta = __sdram_base - DBRx
+
+ # calculate the new values to go in the controller regs
+ cadd.p gr20,gr16,gr20, cc4,#1 ; DCS#0 (new) = DCS#0 (old) + delta
+ cadd gr21,gr16,gr21, cc5,#1
+ cadd.p gr22,gr16,gr22, cc6,#1
+ cadd gr23,gr16,gr23, cc7,#1
+
+ srl gr20,gr15,gr20 ; shift values down for FR551
+ srl gr21,gr15,gr21
+ srl gr22,gr15,gr22
+ srl gr23,gr15,gr23
+
+ # work out the address at which the reg updater resides and lock it into icache
+ # also work out the address the updater will jump to when finished
+ sethi.p %hi(__head_move_sdram-__head_reference),gr18
+ setlo %lo(__head_move_sdram-__head_reference),gr18
+ sethi.p %hi(__head_sdram_moved-__head_reference),gr19
+ setlo %lo(__head_sdram_moved-__head_reference),gr19
+ add.p gr18,gr26,gr18
+ add gr19,gr26,gr19
+ add.p gr19,gr16,gr19 ; moved = addr + (__sdram_base - DBRx)
+ add gr18,gr5,gr4 ; two cachelines probably required
+
+ icpl gr18,gr0,#1 ; load and lock the cachelines
+ icpl gr4,gr0,#1
+ LEDS 0x0004
+ membar
+ bar
+ jmpl @(gr18,gr0)
+
+ .balign L1_CACHE_BYTES
+__head_move_sdram:
+ cst gr20,@(gr14,gr0 ), cc4,#1
+ cst gr21,@(gr14,gr11), cc5,#1
+ cst gr22,@(gr14,gr12), cc6,#1
+ cst gr23,@(gr14,gr13), cc7,#1
+ cld @(gr14,gr0 ),gr20, cc4,#1
+ cld @(gr14,gr11),gr21, cc5,#1
+ cld @(gr14,gr12),gr22, cc4,#1
+ cld @(gr14,gr13),gr23, cc7,#1
+ bar
+ membar
+ jmpl @(gr19,gr0)
+
+ .balign L1_CACHE_BYTES
+__head_sdram_moved:
+ icul gr18
+ add gr18,gr5,gr4
+ icul gr4
+ icei @(gr0,gr0),1
+ dcei @(gr0,gr0),1
+
+ LEDS 0x0005
+
+ # recalculate reference address
+ call 0f
+0: movsg lr,gr26
+ addi gr26,#__head_reference-0b,gr26
+
+
+###############################################################################
+#
+# move the kernel image down to the bottom of the SDRAM
+#
+###############################################################################
+ sethi.p %hi(__kernel_image_size_no_bss+15),gr4
+ setlo %lo(__kernel_image_size_no_bss+15),gr4
+ srli.p gr4,#4,gr4 ; count
+ or gr26,gr26,gr16 ; source
+
+ sethi.p %hi(__sdram_base),gr17 ; destination
+ setlo %lo(__sdram_base),gr17
+
+ setlos #8,gr5
+ sub.p gr16,gr5,gr16 ; adjust src for LDDU
+ sub gr17,gr5,gr17 ; adjust dst for LDDU
+
+ sethi.p %hi(__head_move_kernel-__head_reference),gr18
+ setlo %lo(__head_move_kernel-__head_reference),gr18
+ sethi.p %hi(__head_kernel_moved-__head_reference+__sdram_base),gr19
+ setlo %lo(__head_kernel_moved-__head_reference+__sdram_base),gr19
+ add gr18,gr26,gr18
+ icpl gr18,gr0,#1
+ jmpl @(gr18,gr0)
+
+ .balign 32
+__head_move_kernel:
+ lddu @(gr16,gr5),gr10
+ lddu @(gr16,gr5),gr12
+ stdu.p gr10,@(gr17,gr5)
+ subicc gr4,#1,gr4,icc0
+ stdu.p gr12,@(gr17,gr5)
+ bhi icc0,#0,__head_move_kernel
+ jmpl @(gr19,gr0)
+
+ .balign 32
+__head_kernel_moved:
+ icul gr18
+ icei @(gr0,gr0),1
+ dcei @(gr0,gr0),1
+
+ LEDS 0x0006
+
+ # recalculate reference address
+ call 0f
+0: movsg lr,gr26
+ addi gr26,#__head_reference-0b,gr26
+
+
+###############################################################################
+#
+# rearrange the iomem map and set the protection registers
+#
+###############################################################################
+
+#ifdef CONFIG_MMU
+ LEDS 0x3301
+ call __head_fr451_set_busctl
+ LEDS 0x3303
+ call __head_fr451_survey_sdram
+ LEDS 0x3305
+ call __head_fr451_set_protection
+
+#else
+ movsg psr,gr5
+ srli gr5,#PSR_IMPLE_SHIFT,gr5
+ subicc gr5,#PSR_IMPLE_FR551,gr0,icc0
+ beq icc0,#0,__head_fr555_memmap
+ subicc gr5,#PSR_IMPLE_FR451,gr0,icc0
+ beq icc0,#0,__head_fr451_memmap
+
+ LEDS 0x3101
+ call __head_fr401_set_busctl
+ LEDS 0x3103
+ call __head_fr401_survey_sdram
+ LEDS 0x3105
+ call __head_fr401_set_protection
+ bra __head_done_memmap
+
+__head_fr451_memmap:
+ LEDS 0x3301
+ call __head_fr401_set_busctl
+ LEDS 0x3303
+ call __head_fr401_survey_sdram
+ LEDS 0x3305
+ call __head_fr451_set_protection
+ bra __head_done_memmap
+
+__head_fr555_memmap:
+ LEDS 0x3501
+ call __head_fr555_set_busctl
+ LEDS 0x3503
+ call __head_fr555_survey_sdram
+ LEDS 0x3505
+ call __head_fr555_set_protection
+
+__head_done_memmap:
+#endif
+ LEDS 0x0007
+
+###############################################################################
+#
+# turn the data cache and MMU on
+# - for the FR451 this'll mean that the window through which the kernel is
+# viewed will change
+#
+###############################################################################
+
+#ifdef CONFIG_MMU
+#define MMUMODE HSR0_EIMMU|HSR0_EDMMU|HSR0_EXMMU|HSR0_EDAT|HSR0_XEDAT
+#else
+#define MMUMODE HSR0_EIMMU|HSR0_EDMMU
+#endif
+
+ movsg hsr0,gr5
+
+ sethi.p %hi(MMUMODE),gr4
+ setlo %lo(MMUMODE),gr4
+ or gr4,gr5,gr5
+
+#if defined(CONFIG_FRV_DEFL_CACHE_WTHRU)
+ sethi.p %hi(HSR0_DCE|HSR0_CBM_WRITE_THRU),gr4
+ setlo %lo(HSR0_DCE|HSR0_CBM_WRITE_THRU),gr4
+#elif defined(CONFIG_FRV_DEFL_CACHE_WBACK)
+ sethi.p %hi(HSR0_DCE|HSR0_CBM_COPY_BACK),gr4
+ setlo %lo(HSR0_DCE|HSR0_CBM_COPY_BACK),gr4
+#elif defined(CONFIG_FRV_DEFL_CACHE_WBEHIND)
+ sethi.p %hi(HSR0_DCE|HSR0_CBM_COPY_BACK),gr4
+ setlo %lo(HSR0_DCE|HSR0_CBM_COPY_BACK),gr4
+
+ movsg psr,gr6
+ srli gr6,#24,gr6
+ cmpi gr6,#0x50,icc0 // FR451
+ beq icc0,#0,0f
+ cmpi gr6,#0x40,icc0 // FR405
+ bne icc0,#0,1f
+0:
+ # turn off write-allocate
+ sethi.p %hi(HSR0_NWA),gr6
+ setlo %lo(HSR0_NWA),gr6
+ or gr4,gr6,gr4
+1:
+
+#else
+#error No default cache configuration set
+#endif
+
+ or gr4,gr5,gr5
+ movgs gr5,hsr0
+ bar
+
+ LEDS 0x0008
+
+ sethi.p %hi(__head_mmu_enabled),gr19
+ setlo %lo(__head_mmu_enabled),gr19
+ jmpl @(gr19,gr0)
+
+__head_mmu_enabled:
+ icei @(gr0,gr0),#1
+ dcei @(gr0,gr0),#1
+
+ LEDS 0x0009
+
+#ifdef CONFIG_MMU
+ call __head_fr451_finalise_protection
+#endif
+
+ LEDS 0x000a
+
+###############################################################################
+#
+# set up the runtime environment
+#
+###############################################################################
+
+ # clear the BSS area
+ sethi.p %hi(__bss_start),gr4
+ setlo %lo(__bss_start),gr4
+ sethi.p %hi(_end),gr5
+ setlo %lo(_end),gr5
+ or.p gr0,gr0,gr18
+ or gr0,gr0,gr19
+
+0:
+ stdi gr18,@(gr4,#0)
+ stdi gr18,@(gr4,#8)
+ stdi gr18,@(gr4,#16)
+ stdi.p gr18,@(gr4,#24)
+ addi gr4,#24,gr4
+ subcc gr5,gr4,gr0,icc0
+ bhi icc0,#2,0b
+
+ LEDS 0x000b
+
+ # save the SDRAM details
+ sethi.p %hi(__sdram_old_base),gr4
+ setlo %lo(__sdram_old_base),gr4
+ st gr24,@(gr4,gr0)
+
+ sethi.p %hi(__sdram_base),gr5
+ setlo %lo(__sdram_base),gr5
+ sethi.p %hi(memory_start),gr4
+ setlo %lo(memory_start),gr4
+ st gr5,@(gr4,gr0)
+
+ add gr25,gr5,gr25
+ sethi.p %hi(memory_end),gr4
+ setlo %lo(memory_end),gr4
+ st gr25,@(gr4,gr0)
+
+ # point the TBR at the kernel trap table
+ sethi.p %hi(__entry_kerneltrap_table),gr4
+ setlo %lo(__entry_kerneltrap_table),gr4
+ movgs gr4,tbr
+
+ # set up the exception frame for init
+ sethi.p %hi(__kernel_frame0_ptr),gr28
+ setlo %lo(__kernel_frame0_ptr),gr28
+ sethi.p %hi(_gp),gr16
+ setlo %lo(_gp),gr16
+ sethi.p %hi(__entry_usertrap_table),gr4
+ setlo %lo(__entry_usertrap_table),gr4
+
+ lddi @(gr28,#0),gr28 ; load __frame & current
+ ldi.p @(gr29,#4),gr15 ; set current_thread
+
+ or gr0,gr0,fp
+ or gr28,gr0,sp
+
+ sti.p gr4,@(gr28,REG_TBR)
+ setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
+ movgs gr5,isr
+
+ # turn on and off various CPU services
+ movsg psr,gr22
+ sethi.p %hi(#PSR_EM|PSR_EF|PSR_CM|PSR_NEM),gr4
+ setlo %lo(#PSR_EM|PSR_EF|PSR_CM|PSR_NEM),gr4
+ or gr22,gr4,gr22
+ movgs gr22,psr
+
+ andi gr22,#~(PSR_PIL|PSR_PS|PSR_S),gr22
+ ori gr22,#PSR_ET,gr22
+ sti gr22,@(gr28,REG_PSR)
+
+
+###############################################################################
+#
+# set up the registers and jump into the kernel
+#
+###############################################################################
+
+ LEDS 0x000c
+
+ # initialise the processor and the peripherals
+ #call SYMBOL_NAME(processor_init)
+ #call SYMBOL_NAME(unit_init)
+ #LEDS 0x0aff
+
+ sethi.p #0xe5e5,gr3
+ setlo #0xe5e5,gr3
+ or.p gr3,gr0,gr4
+ or gr3,gr0,gr5
+ or.p gr3,gr0,gr6
+ or gr3,gr0,gr7
+ or.p gr3,gr0,gr8
+ or gr3,gr0,gr9
+ or.p gr3,gr0,gr10
+ or gr3,gr0,gr11
+ or.p gr3,gr0,gr12
+ or gr3,gr0,gr13
+ or.p gr3,gr0,gr14
+ or gr3,gr0,gr17
+ or.p gr3,gr0,gr18
+ or gr3,gr0,gr19
+ or.p gr3,gr0,gr20
+ or gr3,gr0,gr21
+ or.p gr3,gr0,gr23
+ or gr3,gr0,gr24
+ or.p gr3,gr0,gr25
+ or gr3,gr0,gr26
+ or.p gr3,gr0,gr27
+# or gr3,gr0,gr30
+ or gr3,gr0,gr31
+ movgs gr0,lr
+ movgs gr0,lcr
+ movgs gr0,ccr
+ movgs gr0,cccr
+
+ # initialise the virtual interrupt handling
+ subcc gr0,gr0,gr0,icc2 /* set Z, clear C */
+
+#ifdef CONFIG_MMU
+ movgs gr3,scr2
+ movgs gr3,scr3
+#endif
+
+ LEDS 0x0fff
+
+ # invoke the debugging stub if present
+ # - arch/frv/kernel/debug-stub.c will shift control directly to init/main.c
+ # (it will not return here)
+ break
+ .globl __debug_stub_init_break
+__debug_stub_init_break:
+
+ # however, if you need to use an ICE, and don't care about using any userspace
+ # debugging tools (such as the ptrace syscall), you can just step over the break
+ # above and get to the kernel this way
+ # look at arch/frv/kernel/debug-stub.c: debug_stub_init() to see what you've missed
+ call start_kernel
+
+ .globl __head_end
+__head_end:
+ .size _boot, .-_boot
+
+ # provide a point for GDB to place a break
+ .section .text..start,"ax"
+ .globl _start
+ .balign 4
+_start:
+ call _boot
+
+ .previous
+###############################################################################
+#
+# split a tile off of the region defined by GR8-GR9
+#
+# ENTRY: EXIT:
+# GR4 - IAMPR value representing tile
+# GR5 - DAMPR value representing tile
+# GR6 - IAMLR value representing tile
+# GR7 - DAMLR value representing tile
+# GR8 region base pointer [saved]
+# GR9 region top pointer updated to exclude new tile
+# GR11 xAMLR mask [saved]
+# GR25 SDRAM size [saved]
+# GR30 LED address [saved]
+#
+# - GR8 and GR9 should be rounded up/down to the nearest megabyte before calling
+#
+###############################################################################
+ .globl __head_split_region
+ .type __head_split_region,@function
+__head_split_region:
+ subcc.p gr9,gr8,gr4,icc0
+ setlos #31,gr5
+ scan.p gr4,gr0,gr6
+ beq icc0,#0,__head_region_empty
+ sub.p gr5,gr6,gr6 ; bit number of highest set bit (1MB=>20)
+ setlos #1,gr4
+ sll.p gr4,gr6,gr4 ; size of region (1 << bitno)
+ subi gr6,#17,gr6 ; 1MB => 0x03
+ slli.p gr6,#4,gr6 ; 1MB => 0x30
+ sub gr9,gr4,gr9 ; move uncovered top down
+
+ or gr9,gr6,gr4
+ ori gr4,#xAMPRx_S_USER|xAMPRx_C_CACHED|xAMPRx_V,gr4
+ or.p gr4,gr0,gr5
+
+ and gr4,gr11,gr6
+ and.p gr5,gr11,gr7
+ bralr
+
+__head_region_empty:
+ or.p gr0,gr0,gr4
+ or gr0,gr0,gr5
+ or.p gr0,gr0,gr6
+ or gr0,gr0,gr7
+ bralr
+ .size __head_split_region, .-__head_split_region
+
+###############################################################################
+#
+# write the 32-bit hex number in GR8 to ttyS0
+#
+###############################################################################
+#if 0
+ .globl __head_write_to_ttyS0
+ .type __head_write_to_ttyS0,@function
+__head_write_to_ttyS0:
+ sethi.p %hi(0xfeff9c00),gr31
+ setlo %lo(0xfeff9c00),gr31
+ setlos #8,gr20
+
+0: ldubi @(gr31,#5*8),gr21
+ andi gr21,#0x60,gr21
+ subicc gr21,#0x60,gr21,icc0
+ bne icc0,#0,0b
+
+1: srli gr8,#28,gr21
+ slli gr8,#4,gr8
+
+ addi gr21,#'0',gr21
+ subicc gr21,#'9',gr0,icc0
+ bls icc0,#2,2f
+ addi gr21,#'A'-'0'-10,gr21
+2: