summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/arm/kernel
downloadkernel-crypto-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz
kernel-crypto-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.xz
kernel-crypto-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile38
-rw-r--r--arch/arm/kernel/apm.c610
-rw-r--r--arch/arm/kernel/arch.c46
-rw-r--r--arch/arm/kernel/armksyms.c175
-rw-r--r--arch/arm/kernel/arthur.c91
-rw-r--r--arch/arm/kernel/asm-offsets.c83
-rw-r--r--arch/arm/kernel/bios32.c699
-rw-r--r--arch/arm/kernel/calls.S335
-rw-r--r--arch/arm/kernel/compat.c225
-rw-r--r--arch/arm/kernel/debug.S106
-rw-r--r--arch/arm/kernel/dma-isa.c207
-rw-r--r--arch/arm/kernel/dma.c302
-rw-r--r--arch/arm/kernel/ecard.c1210
-rw-r--r--arch/arm/kernel/entry-armv.S745
-rw-r--r--arch/arm/kernel/entry-common.S260
-rw-r--r--arch/arm/kernel/entry-header.S182
-rw-r--r--arch/arm/kernel/fiq.c181
-rw-r--r--arch/arm/kernel/head.S516
-rw-r--r--arch/arm/kernel/init_task.c44
-rw-r--r--arch/arm/kernel/io.c51
-rw-r--r--arch/arm/kernel/irq.c1038
-rw-r--r--arch/arm/kernel/isa.c53
-rw-r--r--arch/arm/kernel/iwmmxt.S320
-rw-r--r--arch/arm/kernel/module.c152
-rw-r--r--arch/arm/kernel/process.c460
-rw-r--r--arch/arm/kernel/ptrace.c861
-rw-r--r--arch/arm/kernel/ptrace.h12
-rw-r--r--arch/arm/kernel/semaphore.c220
-rw-r--r--arch/arm/kernel/setup.c875
-rw-r--r--arch/arm/kernel/signal.c748
-rw-r--r--arch/arm/kernel/smp.c396
-rw-r--r--arch/arm/kernel/sys_arm.c332
-rw-r--r--arch/arm/kernel/time.c402
-rw-r--r--arch/arm/kernel/traps.c590
-rw-r--r--arch/arm/kernel/vmlinux.lds.S166
35 files changed, 12731 insertions, 0 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
new file mode 100644
index 00000000000..07a56ff6149
--- /dev/null
+++ b/arch/arm/kernel/Makefile
@@ -0,0 +1,38 @@
+#
+# Makefile for the linux kernel.
+#
+
+AFLAGS_head.o := -DTEXTADDR=$(TEXTADDR) -DDATAADDR=$(DATAADDR)
+
+# Object file lists.
+
+obj-y := arch.o compat.o dma.o entry-armv.o entry-common.o irq.o \
+ process.o ptrace.o semaphore.o setup.o signal.o sys_arm.o \
+ time.o traps.o
+
+obj-$(CONFIG_APM) += apm.o
+obj-$(CONFIG_ARCH_ACORN) += ecard.o
+obj-$(CONFIG_FOOTBRIDGE) += isa.o
+obj-$(CONFIG_FIQ) += fiq.o
+obj-$(CONFIG_MODULES) += armksyms.o module.o
+obj-$(CONFIG_ARTHUR) += arthur.o
+obj-$(CONFIG_ISA_DMA) += dma-isa.o
+obj-$(CONFIG_PCI) += bios32.o
+obj-$(CONFIG_SMP) += smp.o
+
+obj-$(CONFIG_IWMMXT) += iwmmxt.o
+AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
+
+ifneq ($(CONFIG_ARCH_EBSA110),y)
+ obj-y += io.o
+endif
+
+head-y := head.o
+obj-$(CONFIG_DEBUG_LL) += debug.o
+
+extra-y := $(head-y) init_task.o vmlinux.lds
+
+# Spell out some dependencies that aren't automatically figured out
+$(obj)/entry-armv.o: $(obj)/entry-header.S include/asm-arm/constants.h
+$(obj)/entry-common.o: $(obj)/entry-header.S include/asm-arm/constants.h \
+ $(obj)/calls.S
diff --git a/arch/arm/kernel/apm.c b/arch/arm/kernel/apm.c
new file mode 100644
index 00000000000..b0bbd1e62eb
--- /dev/null
+++ b/arch/arm/kernel/apm.c
@@ -0,0 +1,610 @@
+/*
+ * bios-less APM driver for ARM Linux
+ * Jamey Hicks <jamey@crl.dec.com>
+ * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
+ *
+ * APM 1.2 Reference:
+ * Intel Corporation, Microsoft Corporation. Advanced Power Management
+ * (APM) BIOS Interface Specification, Revision 1.2, February 1996.
+ *
+ * [This document is available from Microsoft at:
+ * http://www.microsoft.com/hwdev/busbios/amp_12.htm]
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/miscdevice.h>
+#include <linux/apm_bios.h>
+#include <linux/sched.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+
+#include <asm/apm.h> /* apm_power_info */
+#include <asm/system.h>
+
+/*
+ * The apm_bios device is one of the misc char devices.
+ * This is its minor number.
+ */
+#define APM_MINOR_DEV 134
+
+/*
+ * See Documentation/Config.help for the configuration options.
+ *
+ * Various options can be changed at boot time as follows:
+ * (We allow underscores for compatibility with the modules code)
+ * apm=on/off enable/disable APM
+ */
+
+/*
+ * Maximum number of events stored
+ */
+#define APM_MAX_EVENTS 16
+
+struct apm_queue {
+ unsigned int event_head;
+ unsigned int event_tail;
+ apm_event_t events[APM_MAX_EVENTS];
+};
+
+/*
+ * The per-file APM data
+ */
+struct apm_user {
+ struct list_head list;
+
+ unsigned int suser: 1;
+ unsigned int writer: 1;
+ unsigned int reader: 1;
+
+ int suspend_result;
+ unsigned int suspend_state;
+#define SUSPEND_NONE 0 /* no suspend pending */
+#define SUSPEND_PENDING 1 /* suspend pending read */
+#define SUSPEND_READ 2 /* suspend read, pending ack */
+#define SUSPEND_ACKED 3 /* suspend acked */
+#define SUSPEND_DONE 4 /* suspend completed */
+
+ struct apm_queue queue;
+};
+
+/*
+ * Local variables
+ */
+static int suspends_pending;
+static int apm_disabled;
+
+static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
+static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
+
+/*
+ * This is a list of everyone who has opened /dev/apm_bios
+ */
+static DECLARE_RWSEM(user_list_lock);
+static LIST_HEAD(apm_user_list);
+
+/*
+ * kapmd info. kapmd provides us a process context to handle
+ * "APM" events within - specifically necessary if we're going
+ * to be suspending the system.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
+static DECLARE_COMPLETION(kapmd_exit);
+static DEFINE_SPINLOCK(kapmd_queue_lock);
+static struct apm_queue kapmd_queue;
+
+
+static const char driver_version[] = "1.13"; /* no spaces */
+
+
+
+/*
+ * Compatibility cruft until the IPAQ people move over to the new
+ * interface.
+ */
+static void __apm_get_power_status(struct apm_power_info *info)
+{
+}
+
+/*
+ * This allows machines to provide their own "apm get power status" function.
+ */
+void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
+EXPORT_SYMBOL(apm_get_power_status);
+
+
+/*
+ * APM event queue management.
+ */
+static inline int queue_empty(struct apm_queue *q)
+{
+ return q->event_head == q->event_tail;
+}
+
+static inline apm_event_t queue_get_event(struct apm_queue *q)
+{
+ q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+ return q->events[q->event_tail];
+}
+
+static void queue_add_event(struct apm_queue *q, apm_event_t event)
+{
+ q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
+ if (q->event_head == q->event_tail) {
+ static int notified;
+
+ if (notified++ == 0)
+ printk(KERN_ERR "apm: an event queue overflowed\n");
+ q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+ }
+ q->events[q->event_head] = event;
+}
+
+static void queue_event_one_user(struct apm_user *as, apm_event_t event)
+{
+ if (as->suser && as->writer) {
+ switch (event) {
+ case APM_SYS_SUSPEND:
+ case APM_USER_SUSPEND:
+ /*
+ * If this user already has a suspend pending,
+ * don't queue another one.
+ */
+ if (as->suspend_state != SUSPEND_NONE)
+ return;
+
+ as->suspend_state = SUSPEND_PENDING;
+ suspends_pending++;
+ break;
+ }
+ }
+ queue_add_event(&as->queue, event);
+}
+
+static void queue_event(apm_event_t event, struct apm_user *sender)
+{
+ struct apm_user *as;
+
+ down_read(&user_list_lock);
+ list_for_each_entry(as, &apm_user_list, list) {
+ if (as != sender && as->reader)
+ queue_event_one_user(as, event);
+ }
+ up_read(&user_list_lock);
+ wake_up_interruptible(&apm_waitqueue);
+}
+
+static void apm_suspend(void)
+{
+ struct apm_user *as;
+ int err = pm_suspend(PM_SUSPEND_MEM);
+
+ /*
+ * Anyone on the APM queues will think we're still suspended.
+ * Send a message so everyone knows we're now awake again.
+ */
+ queue_event(APM_NORMAL_RESUME, NULL);
+
+ /*
+ * Finally, wake up anyone who is sleeping on the suspend.
+ */
+ down_read(&user_list_lock);
+ list_for_each_entry(as, &apm_user_list, list) {
+ as->suspend_result = err;
+ as->suspend_state = SUSPEND_DONE;
+ }
+ up_read(&user_list_lock);
+
+ wake_up(&apm_suspend_waitqueue);
+}
+
+static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct apm_user *as = fp->private_data;
+ apm_event_t event;
+ int i = count, ret = 0;
+
+ if (count < sizeof(apm_event_t))
+ return -EINVAL;
+
+ if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
+
+ while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
+ event = queue_get_event(&as->queue);
+
+ ret = -EFAULT;
+ if (copy_to_user(buf, &event, sizeof(event)))
+ break;
+
+ if (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)
+ as->suspend_state = SUSPEND_READ;
+
+ buf += sizeof(event);
+ i -= sizeof(event);
+ }
+
+ if (i < count)
+ ret = count - i;
+
+ return ret;
+}
+
+static unsigned int apm_poll(struct file *fp, poll_table * wait)
+{
+ struct apm_user *as = fp->private_data;
+
+ poll_wait(fp, &apm_waitqueue, wait);
+ return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM;
+}
+
+/*
+ * apm_ioctl - handle APM ioctl
+ *
+ * APM_IOC_SUSPEND
+ * This IOCTL is overloaded, and performs two functions. It is used to:
+ * - initiate a suspend
+ * - acknowledge a suspend read from /dev/apm_bios.
+ * Only when everyone who has opened /dev/apm_bios with write permission
+ * has acknowledge does the actual suspend happen.
+ */
+static int
+apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
+{
+ struct apm_user *as = filp->private_data;
+ unsigned long flags;
+ int err = -EINVAL;
+
+ if (!as->suser || !as->writer)
+ return -EPERM;
+
+ switch (cmd) {
+ case APM_IOC_SUSPEND:
+ as->suspend_result = -EINTR;
+
+ if (as->suspend_state == SUSPEND_READ) {
+ /*
+ * If we read a suspend command from /dev/apm_bios,
+ * then the corresponding APM_IOC_SUSPEND ioctl is
+ * interpreted as an acknowledge.
+ */
+ as->suspend_state = SUSPEND_ACKED;
+ suspends_pending--;
+ } else {
+ /*
+ * Otherwise it is a request to suspend the system.
+ * Queue an event for all readers, and expect an
+ * acknowledge from all writers who haven't already
+ * acknowledged.
+ */
+ queue_event(APM_USER_SUSPEND, as);
+ }
+
+ /*
+ * If there are no further acknowledges required, suspend
+ * the system.
+ */
+ if (suspends_pending == 0)
+ apm_suspend();
+
+ /*
+ * Wait for the suspend/resume to complete. If there are
+ * pending acknowledges, we wait here for them.
+ *
+ * Note that we need to ensure that the PM subsystem does
+ * not kick us out of the wait when it suspends the threads.
+ */
+ flags = current->flags;
+ current->flags |= PF_NOFREEZE;
+
+ /*
+ * Note: do not allow a thread which is acking the suspend
+ * to escape until the resume is complete.
+ */
+ if (as->suspend_state == SUSPEND_ACKED)
+ wait_event(apm_suspend_waitqueue,
+ as->suspend_state == SUSPEND_DONE);
+ else
+ wait_event_interruptible(apm_suspend_waitqueue,
+ as->suspend_state == SUSPEND_DONE);
+
+ current->flags = flags;
+ err = as->suspend_result;
+ as->suspend_state = SUSPEND_NONE;
+ break;
+ }
+
+ return err;
+}
+
+static int apm_release(struct inode * inode, struct file * filp)
+{
+ struct apm_user *as = filp->private_data;
+ filp->private_data = NULL;
+
+ down_write(&user_list_lock);
+ list_del(&as->list);
+ up_write(&user_list_lock);
+
+ /*
+ * We are now unhooked from the chain. As far as new
+ * events are concerned, we no longer exist. However, we
+ * need to balance suspends_pending, which means the
+ * possibility of sleeping.
+ */
+ if (as->suspend_state != SUSPEND_NONE) {
+ suspends_pending -= 1;
+ if (suspends_pending == 0)
+ apm_suspend();
+ }
+
+ kfree(as);
+ return 0;
+}
+
+static int apm_open(struct inode * inode, struct file * filp)
+{
+ struct apm_user *as;
+
+ as = (struct apm_user *)kmalloc(sizeof(*as), GFP_KERNEL);
+ if (as) {
+ memset(as, 0, sizeof(*as));
+
+ /*
+ * XXX - this is a tiny bit broken, when we consider BSD
+ * process accounting. If the device is opened by root, we
+ * instantly flag that we used superuser privs. Who knows,
+ * we might close the device immediately without doing a
+ * privileged operation -- cevans
+ */
+ as->suser = capable(CAP_SYS_ADMIN);
+ as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
+ as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
+
+ down_write(&user_list_lock);
+ list_add(&as->list, &apm_user_list);
+ up_write(&user_list_lock);
+
+ filp->private_data = as;
+ }
+
+ return as ? 0 : -ENOMEM;
+}
+
+static struct file_operations apm_bios_fops = {
+ .owner = THIS_MODULE,
+ .read = apm_read,
+ .poll = apm_poll,
+ .ioctl = apm_ioctl,
+ .open = apm_open,
+ .release = apm_release,
+};
+
+static struct miscdevice apm_device = {
+ .minor = APM_MINOR_DEV,
+ .name = "apm_bios",
+ .fops = &apm_bios_fops
+};
+
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Arguments, with symbols from linux/apm_bios.h.
+ *
+ * 0) Linux driver version (this will change if format changes)
+ * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2.
+ * 2) APM flags from APM Installation Check (0x00):
+ * bit 0: APM_16_BIT_SUPPORT
+ * bit 1: APM_32_BIT_SUPPORT
+ * bit 2: APM_IDLE_SLOWS_CLOCK
+ * bit 3: APM_BIOS_DISABLED
+ * bit 4: APM_BIOS_DISENGAGED
+ * 3) AC line status
+ * 0x00: Off-line
+ * 0x01: On-line
+ * 0x02: On backup power (BIOS >= 1.1 only)
+ * 0xff: Unknown
+ * 4) Battery status
+ * 0x00: High
+ * 0x01: Low
+ * 0x02: Critical
+ * 0x03: Charging
+ * 0x04: Selected battery not present (BIOS >= 1.2 only)
+ * 0xff: Unknown
+ * 5) Battery flag
+ * bit 0: High
+ * bit 1: Low
+ * bit 2: Critical
+ * bit 3: Charging
+ * bit 7: No system battery
+ * 0xff: Unknown
+ * 6) Remaining battery life (percentage of charge):
+ * 0-100: valid
+ * -1: Unknown
+ * 7) Remaining battery life (time units):
+ * Number of remaining minutes or seconds
+ * -1: Unknown
+ * 8) min = minutes; sec = seconds
+ */
+static int apm_get_info(char *buf, char **start, off_t fpos, int length)
+{
+ struct apm_power_info info;
+ char *units;
+ int ret;
+
+ info.ac_line_status = 0xff;
+ info.battery_status = 0xff;
+ info.battery_flag = 0xff;
+ info.battery_life = -1;
+ info.time = -1;
+ info.units = -1;
+
+ if (apm_get_power_status)
+ apm_get_power_status(&info);
+
+ switch (info.units) {
+ default: units = "?"; break;
+ case 0: units = "min"; break;
+ case 1: units = "sec"; break;
+ }
+
+ ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
+ driver_version, APM_32_BIT_SUPPORT,
+ info.ac_line_status, info.battery_status,
+ info.battery_flag, info.battery_life,
+ info.time, units);
+
+ return ret;
+}
+#endif
+
+static int kapmd(void *arg)
+{
+ daemonize("kapmd");
+ current->flags |= PF_NOFREEZE;
+
+ do {
+ apm_event_t event;
+
+ wait_event_interruptible(kapmd_wait,
+ !queue_empty(&kapmd_queue) || !pm_active);
+
+ if (!pm_active)
+ break;
+
+ spin_lock_irq(&kapmd_queue_lock);
+ event = 0;
+ if (!queue_empty(&kapmd_queue))
+ event = queue_get_event(&kapmd_queue);
+ spin_unlock_irq(&kapmd_queue_lock);
+
+ switch (event) {
+ case 0:
+ break;
+
+ case APM_LOW_BATTERY:
+ case APM_POWER_STATUS_CHANGE:
+ queue_event(event, NULL);
+ break;
+
+ case APM_USER_SUSPEND:
+ case APM_SYS_SUSPEND:
+ queue_event(event, NULL);
+ if (suspends_pending == 0)
+ apm_suspend();
+ break;
+
+ case APM_CRITICAL_SUSPEND:
+ apm_suspend();
+ break;
+ }
+ } while (1);
+
+ complete_and_exit(&kapmd_exit, 0);
+}
+
+static int __init apm_init(void)
+{
+ int ret;
+
+ if (apm_disabled) {
+ printk(KERN_NOTICE "apm: disabled on user request.\n");
+ return -ENODEV;
+ }
+
+ if (PM_IS_ACTIVE()) {
+ printk(KERN_NOTICE "apm: overridden by ACPI.\n");
+ return -EINVAL;
+ }
+
+ pm_active = 1;
+
+ ret = kernel_thread(kapmd, NULL, CLONE_KERNEL);
+ if (ret < 0) {
+ pm_active = 0;
+ return ret;
+ }
+
+#ifdef CONFIG_PROC_FS
+ create_proc_info_entry("apm", 0, NULL, apm_get_info);
+#endif
+
+ ret = misc_register(&apm_device);
+ if (ret != 0) {
+ remove_proc_entry("apm", NULL);
+
+ pm_active = 0;
+ wake_up(&kapmd_wait);
+ wait_for_completion(&kapmd_exit);
+ }
+
+ return ret;
+}
+
+static void __exit apm_exit(void)
+{
+ misc_deregister(&apm_device);
+ remove_proc_entry("apm", NULL);
+
+ pm_active = 0;
+ wake_up(&kapmd_wait);
+ wait_for_completion(&kapmd_exit);
+}
+
+module_init(apm_init);
+module_exit(apm_exit);
+
+MODULE_AUTHOR("Stephen Rothwell");
+MODULE_DESCRIPTION("Advanced Power Management");
+MODULE_LICENSE("GPL");
+
+#ifndef MODULE
+static int __init apm_setup(char *str)
+{
+ while ((str != NULL) && (*str != '\0')) {
+ if (strncmp(str, "off", 3) == 0)
+ apm_disabled = 1;
+ if (strncmp(str, "on", 2) == 0)
+ apm_disabled = 0;
+ str = strchr(str, ',');
+ if (str != NULL)
+ str += strspn(str, ", \t");
+ }
+ return 1;
+}
+
+__setup("apm=", apm_setup);
+#endif
+
+/**
+ * apm_queue_event - queue an APM event for kapmd
+ * @event: APM event
+ *
+ * Queue an APM event for kapmd to process and ultimately take the
+ * appropriate action. Only a subset of events are handled:
+ * %APM_LOW_BATTERY
+ * %APM_POWER_STATUS_CHANGE
+ * %APM_USER_SUSPEND
+ * %APM_SYS_SUSPEND
+ * %APM_CRITICAL_SUSPEND
+ */
+void apm_queue_event(apm_event_t event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kapmd_queue_lock, flags);
+ queue_add_event(&kapmd_queue, event);
+ spin_unlock_irqrestore(&kapmd_queue_lock, flags);
+
+ wake_up_interruptible(&kapmd_wait);
+}
+EXPORT_SYMBOL(apm_queue_event);
diff --git a/arch/arm/kernel/arch.c b/arch/arm/kernel/arch.c
new file mode 100644
index 00000000000..4e02fbeb10a
--- /dev/null
+++ b/arch/arm/kernel/arch.c
@@ -0,0 +1,46 @@
+/*
+ * linux/arch/arm/kernel/arch.c
+ *
+ * Architecture specific fixups.
+ */
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <asm/elf.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+#include <asm/mach/arch.h>
+
+unsigned int vram_size;
+
+#ifdef CONFIG_ARCH_ACORN
+
+unsigned int memc_ctrl_reg;
+unsigned int number_mfm_drives;
+
+static int __init parse_tag_acorn(const struct tag *tag)
+{
+ memc_ctrl_reg = tag->u.acorn.memc_control_reg;
+ number_mfm_drives = tag->u.acorn.adfsdrives;
+
+ switch (tag->u.acorn.vram_pages) {
+ case 512:
+ vram_size += PAGE_SIZE * 256;
+ case 256:
+ vram_size += PAGE_SIZE * 256;
+ default:
+ break;
+ }
+#if 0
+ if (vram_size) {
+ desc->video_start = 0x02000000;
+ desc->video_end = 0x02000000 + vram_size;
+ }
+#endif
+ return 0;
+}
+
+__tagtable(ATAG_ACORN, parse_tag_acorn);
+
+#endif
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
new file mode 100644
index 00000000000..4c38bd8bc29
--- /dev/null
+++ b/arch/arm/kernel/armksyms.c
@@ -0,0 +1,175 @@
+/*
+ * linux/arch/arm/kernel/armksyms.c
+ *
+ * Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/in6.h>
+#include <linux/syscalls.h>
+
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+extern void __udivdi3(void);
+extern void __umoddi3(void);
+extern void __udivmoddi4(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+extern void __do_div64(void);
+
+extern void fpundefinstr(void);
+extern void fp_enter(void);
+
+/*
+ * This has a special calling convention; it doesn't
+ * modify any of the usual registers, except for LR.
+ */
+#define EXPORT_SYMBOL_ALIAS(sym,orig) \
+ const struct kernel_symbol __ksymtab_##sym \
+ __attribute__((section("__ksymtab"))) = \
+ { (unsigned long)&orig, #sym };
+
+/*
+ * floating point math emulator support.
+ * These symbols will never change their calling convention...
+ */
+EXPORT_SYMBOL_ALIAS(kern_fp_enter,fp_enter);
+EXPORT_SYMBOL_ALIAS(fp_printk,printk);
+EXPORT_SYMBOL_ALIAS(fp_send_sig,send_sig);
+
+EXPORT_SYMBOL(__backtrace);
+
+ /* platform dependent support */
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__const_udelay);
+
+ /* networking */
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(__csum_ipv6_magic);
+
+ /* io */
+#ifndef __raw_readsb
+EXPORT_SYMBOL(__raw_readsb);
+#endif
+#ifndef __raw_readsw
+EXPORT_SYMBOL(__raw_readsw);
+#endif
+#ifndef __raw_readsl
+EXPORT_SYMBOL(__raw_readsl);
+#endif
+#ifndef __raw_writesb
+EXPORT_SYMBOL(__raw_writesb);
+#endif
+#ifndef __raw_writesw
+EXPORT_SYMBOL(__raw_writesw);
+#endif
+#ifndef __raw_writesl
+EXPORT_SYMBOL(__raw_writesl);
+#endif
+
+ /* string / mem functions */
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(__memzero);
+
+ /* user mem (segment) */
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_strnlen_user);
+EXPORT_SYMBOL(__arch_strncpy_from_user);
+
+EXPORT_SYMBOL(__get_user_1);
+EXPORT_SYMBOL(__get_user_2);
+EXPORT_SYMBOL(__get_user_4);
+EXPORT_SYMBOL(__get_user_8);
+
+EXPORT_SYMBOL(__put_user_1);
+EXPORT_SYMBOL(__put_user_2);
+EXPORT_SYMBOL(__put_user_4);
+EXPORT_SYMBOL(__put_user_8);
+
+ /* gcc lib functions */
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivdi3);
+EXPORT_SYMBOL(__umoddi3);
+EXPORT_SYMBOL(__udivmoddi4);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__do_div64);
+
+ /* bitops */
+EXPORT_SYMBOL(_set_bit_le);
+EXPORT_SYMBOL(_test_and_set_bit_le);
+EXPORT_SYMBOL(_clear_bit_le);
+EXPORT_SYMBOL(_test_and_clear_bit_le);
+EXPORT_SYMBOL(_change_bit_le);
+EXPORT_SYMBOL(_test_and_change_bit_le);
+EXPORT_SYMBOL(_find_first_zero_bit_le);
+EXPORT_SYMBOL(_find_next_zero_bit_le);
+EXPORT_SYMBOL(_find_first_bit_le);
+EXPORT_SYMBOL(_find_next_bit_le);
+
+#ifdef __ARMEB__
+EXPORT_SYMBOL(_set_bit_be);
+EXPORT_SYMBOL(_test_and_set_bit_be);
+EXPORT_SYMBOL(_clear_bit_be);
+EXPORT_SYMBOL(_test_and_clear_bit_be);
+EXPORT_SYMBOL(_change_bit_be);
+EXPORT_SYMBOL(_test_and_change_bit_be);
+EXPORT_SYMBOL(_find_first_zero_bit_be);
+EXPORT_SYMBOL(_find_next_zero_bit_be);
+EXPORT_SYMBOL(_find_first_bit_be);
+EXPORT_SYMBOL(_find_next_bit_be);
+#endif
+
+ /* syscalls */
+EXPORT_SYMBOL(sys_write);
+EXPORT_SYMBOL(sys_read);
+EXPORT_SYMBOL(sys_lseek);
+EXPORT_SYMBOL(sys_open);
+EXPORT_SYMBOL(sys_exit);
+EXPORT_SYMBOL(sys_wait4);
diff --git a/arch/arm/kernel/arthur.c b/arch/arm/kernel/arthur.c
new file mode 100644
index 00000000000..a418dad6692
--- /dev/null
+++ b/arch/arm/kernel/arthur.c
@@ -0,0 +1,91 @@
+/*
+ * linux/arch/arm/kernel/arthur.c
+ *
+ * Copyright (C) 1998, 1999, 2000, 2001 Philip Blundell
+ *
+ * Arthur personality
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/personality.h>
+#include <linux/stddef.h>
+#include <linux/signal.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+
+/* Arthur doesn't have many signals, and a lot of those that it does
+ have don't map easily to any Linux equivalent. Never mind. */
+
+#define ARTHUR_SIGABRT 1
+#define ARTHUR_SIGFPE 2
+#define ARTHUR_SIGILL 3
+#define ARTHUR_SIGINT 4
+#define ARTHUR_SIGSEGV 5
+#define ARTHUR_SIGTERM 6
+#define ARTHUR_SIGSTAK 7
+#define ARTHUR_SIGUSR1 8
+#define ARTHUR_SIGUSR2 9
+#define ARTHUR_SIGOSERROR 10
+
+static unsigned long arthur_to_linux_signals[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31
+};
+
+static unsigned long linux_to_arthur_signals[32] = {
+ 0, -1, ARTHUR_SIGINT, -1,
+ ARTHUR_SIGILL, 5, ARTHUR_SIGABRT, 7,
+ ARTHUR_SIGFPE, 9, ARTHUR_SIGUSR1, ARTHUR_SIGSEGV,
+ ARTHUR_SIGUSR2, 13, 14, ARTHUR_SIGTERM,
+ 16, 17, 18, 19,
+ 20, 21, 22, 23,
+ 24, 25, 26, 27,
+ 28, 29, 30, 31
+};
+
+static void arthur_lcall7(int nr, struct pt_regs *regs)
+{
+ struct siginfo info;
+ info.si_signo = SIGSWI;
+ info.si_errno = nr;
+ /* Bounce it to the emulator */
+ send_sig_info(SIGSWI, &info, current);
+}
+
+static struct exec_domain arthur_exec_domain = {
+ .name = "Arthur",
+ .handler = arthur_lcall7,
+ .pers_low = PER_RISCOS,
+ .pers_high = PER_RISCOS,
+ .signal_map = arthur_to_linux_signals,
+ .signal_invmap = linux_to_arthur_signals,
+ .module = THIS_MODULE,
+};
+
+/*
+ * We could do with some locking to stop Arthur being removed while
+ * processes are using it.
+ */
+
+static int __init arthur_init(void)
+{
+ return register_exec_domain(&arthur_exec_domain);
+}
+
+static void __exit arthur_exit(void)
+{
+ unregister_exec_domain(&arthur_exec_domain);
+}
+
+module_init(arthur_init);
+module_exit(arthur_exit);
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
new file mode 100644
index 00000000000..99d43259ff8
--- /dev/null
+++ b/arch/arm/kernel/asm-offsets.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 1995-2003 Russell King
+ * 2001-2002 Keith Owens
+ *
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/mach/arch.h>
+#include <asm/thread_info.h>
+#include <asm/memory.h>
+
+/*
+ * Make sure that the compiler and target are compatible.
+ */
+#if defined(__APCS_26__)
+#error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
+#endif
+/*
+ * GCC 2.95.1, 2.95.2: ignores register clobber list in asm().
+ * GCC 3.0, 3.1: general bad code generation.
+ * GCC 3.2.0: incorrect function argument offset calculation.
+ * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
+ * (http://gcc.gnu.org/PR8896) and incorrect structure
+ * initialisation in fs/jffs2/erase.c
+ */
+#if __GNUC__ < 2 || \
+ (__GNUC__ == 2 && __GNUC_MINOR__ < 95) || \
+ (__GNUC__ == 2 && __GNUC_MINOR__ == 95 && __GNUC_PATCHLEVEL__ != 0 && \
+ __GNUC_PATCHLEVEL__ < 3) || \
+ (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+#error Your compiler is too buggy; it is known to miscompile kernels.
+#error Known good compilers: 2.95.3, 2.95.4, 2.96, 3.3
+#endif
+
+/* Use marker if you need to separate the values later */
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+ DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
+ DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
+ DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp));
+ DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value));
+ DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
+ DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate));
+ DEFINE(TI_IWMMXT_STATE, (offsetof(struct thread_info, fpstate)+4)&~7);
+ BLANK();
+#if __LINUX_ARM_ARCH__ >= 6
+ DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
+ BLANK();
+#endif
+ DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
+ DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
+ BLANK();
+ DEFINE(VM_EXEC, VM_EXEC);
+ BLANK();
+ DEFINE(PAGE_SZ, PAGE_SIZE);
+ DEFINE(VIRT_OFFSET, PAGE_OFFSET);
+ BLANK();
+ DEFINE(SYS_ERROR0, 0x9f0000);
+ BLANK();
+ DEFINE(SIZEOF_MACHINE_DESC, sizeof(struct machine_desc));
+ return 0;
+}
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
new file mode 100644
index 00000000000..ad26e98f1e6
--- /dev/null
+++ b/arch/arm/kernel/bios32.c
@@ -0,0 +1,699 @@
+/*
+ * linux/arch/arm/kernel/bios32.c
+ *
+ * PCI bios-type initialisation for PCI machines
+ *
+ * Bits taken from various places.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/mach-types.h>
+#include <asm/mach/pci.h>
+
+static int debug_pci;
+static int use_firmware;
+
+/*
+ * We can't use pci_find_device() here since we are
+ * called from interrupt context.
+ */
+static void pcibios_bus_report_status(struct pci_bus *bus, u_int status_mask, int warn)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ u16 status;
+
+ /*
+ * ignore host bridge - we handle
+ * that separately
+ */
+ if (dev->bus->number == 0 && dev->devfn == 0)
+ continue;
+
+ pci_read_config_word(dev, PCI_STATUS, &status);
+ if (status == 0xffff)
+ continue;
+
+ if ((status & status_mask) == 0)
+ continue;
+
+ /* clear the status errors */
+ pci_write_config_word(dev, PCI_STATUS, status & status_mask);
+
+ if (warn)
+ printk("(%s: %04X) ", pci_name(dev), status);
+ }
+
+ list_for_each_entry(dev, &bus->devices, bus_list)
+ if (dev->subordinate)
+ pcibios_bus_report_status(dev->subordinate, status_mask, warn);
+}
+
+void pcibios_report_status(u_int status_mask, int warn)
+{
+ struct list_head *l;
+
+ list_for_each(l, &pci_root_buses) {
+ struct pci_bus *bus = pci_bus_b(l);
+
+ pcibios_bus_report_status(bus, status_mask, warn);
+ }
+}
+
+/*
+ * We don't use this to fix the device, but initialisation of it.
+ * It's not the correct use for this, but it works.
+ * Note that the arbiter/ISA bridge appears to be buggy, specifically in
+ * the following area:
+ * 1. park on CPU
+ * 2. ISA bridge ping-pong
+ * 3. ISA bridge master handling of target RETRY
+ *
+ * Bug 3 is responsible for the sound DMA grinding to a halt. We now
+ * live with bug 2.
+ */
+static void __devinit pci_fixup_83c553(struct pci_dev *dev)
+{
+ /*
+ * Set memory region to start at address 0, and enable IO
+ */
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_SPACE_MEMORY);
+ pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO);
+
+ dev->resource[0].end -= dev->resource[0].start;
+ dev->resource[0].start = 0;
+
+ /*
+ * All memory requests from ISA to be channelled to PCI
+ */
+ pci_write_config_byte(dev, 0x48, 0xff);
+
+ /*
+ * Enable ping-pong on bus master to ISA bridge transactions.
+ * This improves the sound DMA substantially. The fixed
+ * priority arbiter also helps (see below).
+ */
+ pci_write_config_byte(dev, 0x42, 0x01);
+
+ /*
+ * Enable PCI retry
+ */
+ pci_write_config_byte(dev, 0x40, 0x22);
+
+ /*
+ * We used to set the arbiter to "park on last master" (bit
+ * 1 set), but unfortunately the CyberPro does not park the
+ * bus. We must therefore park on CPU. Unfortunately, this
+ * may trigger yet another bug in the 553.
+ */
+ pci_write_config_byte(dev, 0x83, 0x02);
+
+ /*
+ * Make the ISA DMA request lowest priority, and disable
+ * rotating priorities completely.
+ */
+ pci_write_config_byte(dev, 0x80, 0x11);
+ pci_write_config_byte(dev, 0x81, 0x00);
+
+ /*
+ * Route INTA input to IRQ 11, and set IRQ11 to be level
+ * sensitive.
+ */
+ pci_write_config_word(dev, 0x44, 0xb000);
+ outb(0x08, 0x4d1);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, pci_fixup_83c553);
+
+static void __devinit pci_fixup_unassign(struct pci_dev *dev)
+{
+ dev->resource[0].end -= dev->resource[0].start;
+ dev->resource[0].start = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, pci_fixup_unassign);
+
+/*
+ * Prevent the PCI layer from seeing the resources allocated to this device
+ * if it is the host bridge by marking it as such. These resources are of
+ * no consequence to the PCI layer (they are handled elsewhere).
+ */
+static void __devinit pci_fixup_dec21285(struct pci_dev *dev)
+{
+ int i;
+
+ if (dev->devfn == 0) {
+ dev->class &= 0xff;
+ dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ dev->resource[i].start = 0;
+ dev->resource[i].end = 0;
+ dev->resource[i].flags = 0;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285);
+
+/*
+ * Same as above. The PrPMC800 carrier board for the PrPMC1100
+ * card maps the host-bridge @ 00:01:00 for some reason and it
+ * ends up getting scanned. Note that we only want to do this
+ * fixup when we find the IXP4xx on a PrPMC system, which is why
+ * we check the machine type. We could be running on a board
+ * with an IXP4xx target device and we don't want to kill the
+ * resources in that case.
+ */
+static void __devinit pci_fixup_prpmc1100(struct pci_dev *dev)
+{
+ int i;
+
+ if (machine_is_prpmc1100()) {
+ dev->class &= 0xff;
+ dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ dev->resource[i].start = 0;
+ dev->resource[i].end = 0;
+ dev->resource[i].flags = 0;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IXP4XX, pci_fixup_prpmc1100);
+
+/*
+ * PCI IDE controllers use non-standard I/O port decoding, respect it.
+ */
+static void __devinit pci_fixup_ide_bases(struct pci_dev *dev)
+{
+ struct resource *r;
+ int i;
+
+ if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
+ return;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ r = dev->resource + i;
+ if ((r->start & ~0x80) == 0x374) {
+ r->start |= 2;
+ r->end = r->start;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
+
+/*
+ * Put the DEC21142 to sleep
+ */
+static void __devinit pci_fixup_dec21142(struct pci_dev *dev)
+{
+ pci_write_config_dword(dev, 0x40, 0x80000000);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, pci_fixup_dec21142);
+
+/*
+ * The CY82C693 needs some rather major fixups to ensure that it does
+ * the right thing. Idea from the Alpha people, with a few additions.
+ *
+ * We ensure that the IDE base registers are set to 1f0/3f4 for the
+ * primary bus, and 170/374 for the secondary bus. Also, hide them
+ * from the PCI subsystem view as well so we won't try to perform
+ * our own auto-configuration on them.
+ *
+ * In addition, we ensure that the PCI IDE interrupts are routed to
+ * IRQ 14 and IRQ 15 respectively.
+ *
+ * The above gets us to a point where the IDE on this device is
+ * functional. However, The CY82C693U _does not work_ in bus
+ * master mode without locking the PCI bus solid.
+ */
+static void __devinit pci_fixup_cy82c693(struct pci_dev *dev)
+{
+ if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
+ u32 base0, base1;
+
+ if (dev->class & 0x80) { /* primary */
+ base0 = 0x1f0;
+ base1 = 0x3f4;
+ } else { /* secondary */
+ base0 = 0x170;
+ base1 = 0x374;
+ }
+
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
+ base0 | PCI_BASE_ADDRESS_SPACE_IO);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
+ base1 | PCI_BASE_ADDRESS_SPACE_IO);
+
+ dev->resource[0].start = 0;
+ dev->resource[0].end = 0;
+ dev->resource[0].flags = 0;
+
+ dev->resource[1].start = 0;
+ dev->resource[1].end = 0;
+ dev->resource[1].flags = 0;
+ } else if (PCI_FUNC(dev->devfn) == 0) {
+ /*
+ * Setup IDE IRQ routing.
+ */
+ pci_write_config_byte(dev, 0x4b, 14);
+ pci_write_config_byte(dev, 0x4c, 15);
+
+ /*
+ * Disable FREQACK handshake, enable USB.
+ */
+ pci_write_config_byte(dev, 0x4d, 0x41);
+
+ /*
+ * Enable PCI retry, and PCI post-write buffer.
+ */
+ pci_write_config_byte(dev, 0x44, 0x17);
+
+ /*
+ * Enable ISA master and DMA post write buffering.
+ */
+ pci_write_config_byte(dev, 0x45, 0x03);
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693);
+
+void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ if (debug_pci)
+ printk("PCI: Assigning IRQ %02d to %s\n", irq, pci_name(dev));
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+/*
+ * If the bus contains any of these devices, then we must not turn on
+ * parity checking of any kind. Currently this is CyberPro 20x0 only.
+ */
+static inline int pdev_bad_for_parity(struct pci_dev *dev)
+{
+ return (dev->vendor == PCI_VENDOR_ID_INTERG &&
+ (dev->device == PCI_DEVICE_ID_INTERG_2000 ||
+ dev->device == PCI_DEVICE_ID_INTERG_2010));
+}
+
+/*
+ * Adjust the device resources from bus-centric to Linux-centric.
+ */
+static void __devinit
+pdev_fixup_device_resources(struct pci_sys_data *root, struct pci_dev *dev)
+{
+ unsigned long offset;
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ if (dev->resource[i].start == 0)
+ continue;
+ if (dev->resource[i].flags & IORESOURCE_MEM)
+ offset = root->mem_offset;
+ else
+ offset = root->io_offset;
+
+ dev->resource[i].start += offset;
+ dev->resource[i].end += offset;
+ }
+}
+
+static void __devinit
+pbus_assign_bus_resources(struct pci_bus *bus, struct pci_sys_data *root)
+{
+ struct pci_dev *dev = bus->self;
+ int i;
+
+ if (!dev) {
+ /*
+ * Assign root bus resources.
+ */
+ for (i = 0; i < 3; i++)
+ bus->resource[i] = root->resource[i];
+ }
+}
+
+/*
+ * pcibios_fixup_bus - Called after each bus is probed,
+ * but before its children are examined.
+ */
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+{
+ struct pci_sys_data *root = bus->sysdata;
+ struct pci_dev *dev;
+ u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK;
+
+ pbus_assign_bus_resources(bus, root);
+
+ /*
+ * Walk the devices on this bus, working out what we can
+ * and can't support.
+ */
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ u16 status;
+
+ pdev_fixup_device_resources(root, dev);
+
+ pci_read_config_word(dev, PCI_STATUS, &status);
+
+ /*
+ * If any device on this bus does not support fast back
+ * to back transfers, then the bus as a whole is not able
+ * to support them. Having fast back to back transfers
+ * on saves us one PCI cycle per transaction.
+ */
+ if (!(status & PCI_STATUS_FAST_BACK))
+ features &= ~PCI_COMMAND_FAST_BACK;
+
+ if (pdev_bad_for_parity(dev))
+ features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+
+ switch (dev->class >> 8) {
+#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
+ case PCI_CLASS_BRIDGE_ISA:
+ case PCI_CLASS_BRIDGE_EISA:
+ /*
+ * If this device is an ISA bridge, set isa_bridge
+ * to point at this device. We will then go looking
+ * for things like keyboard, etc.
+ */
+ isa_bridge = dev;
+ break;
+#endif
+ case PCI_CLASS_BRIDGE_PCI:
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status);
+ status |= PCI_BRIDGE_CTL_PARITY|PCI_BRIDGE_CTL_MASTER_ABORT;
+ status &= ~(PCI_BRIDGE_CTL_BUS_RESET|PCI_BRIDGE_CTL_FAST_BACK);
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status);
+ break;
+
+ case PCI_CLASS_BRIDGE_CARDBUS:
+ pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL, &status);
+ status |= PCI_CB_BRIDGE_CTL_PARITY|PCI_CB_BRIDGE_CTL_MASTER_ABORT;
+ pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL, status);
+ break;
+ }
+ }
+
+ /*
+ * Now walk the devices again, this time setting them up.
+ */
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ u16 cmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ cmd |= features;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
+ L1_CACHE_BYTES >> 2);
+ }
+
+ /*
+ * Propagate the flags to the PCI bridge.
+ */
+ if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ if (features & PCI_COMMAND_FAST_BACK)
+ bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK;
+ if (features & PCI_COMMAND_PARITY)
+ bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY;
+ }
+
+ /*
+ * Report what we did for this bus
+ */
+ printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
+ bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
+}
+
+/*
+ * Convert from Linux-centric to bus-centric addresses for bridge devices.
+ */
+void __devinit
+pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+ struct resource *res)
+{
+ struct pci_sys_data *root = dev->sysdata;
+ unsigned long offset = 0;
+
+ if (res->flags & IORESOURCE_IO)
+ offset = root->io_offset;
+ if (res->flags & IORESOURCE_MEM)
+ offset = root->mem_offset;
+
+ region->start = res->start - offset;
+ region->end = res->end - offset;
+}
+
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pcibios_fixup_bus);
+EXPORT_SYMBOL(pcibios_resource_to_bus);
+#endif
+
+/*
+ * This is the standard PCI-PCI bridge swizzling algorithm:
+ *
+ * Dev: 0 1 2 3
+ * A A B C D
+ * B B C D A
+ * C C D A B
+ * D D A B C
+ * ^^^^^^^^^^ irq pin on bridge
+ */
+u8 __devinit pci_std_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ int pin = *pinp - 1;
+
+ while (dev->bus->self) {
+ pin = (pin + PCI_SLOT(dev->devfn)) & 3;
+ /*
+ * move up the chain of bridges,
+ * swizzling as we go.
+ */
+ dev = dev->bus->self;
+ }
+ *pinp = pin + 1;
+
+ return PCI_SLOT(dev->devfn);
+}
+
+/*
+ * Swizzle the device pin each time we cross a bridge.
+ * This might update pin and returns the slot number.
+ */
+static u8 __devinit pcibios_swizzle(struct pci_dev *dev, u8 *pin)
+{
+ struct pci_sys_data *sys = dev->sysdata;
+ int slot = 0, oldpin = *pin;
+
+ if (sys->swizzle)
+ slot = sys->swizzle(dev, pin);
+
+ if (debug_pci)
+ printk("PCI: %s swizzling pin %d => pin %d slot %d\n",
+ pci_name(dev), oldpin, *pin, slot);
+
+ return slot;
+}
+
+/*
+ * Map a slot/pin to an IRQ.
+ */
+static int pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pci_sys_data *sys = dev->sysdata;
+ int irq = -1;
+
+ if (sys->map_irq)
+ irq = sys->map_irq(dev, slot, pin);
+
+ if (debug_pci)
+ printk("PCI: %s mapping slot %d pin %d => irq %d\n",
+ pci_name(dev), slot, pin, irq);
+
+ return irq;
+}
+
+static void __init pcibios_init_hw(struct hw_pci *hw)
+{
+ struct pci_sys_data *sys = NULL;
+ int ret;
+ int nr, busnr;
+
+ for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
+ sys = kmalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
+ if (!sys)
+ panic("PCI: unable to allocate sys data!");
+
+ memset(sys, 0, sizeof(struct pci_sys_data));
+
+ sys->hw = hw;
+ sys->busnr = busnr;
+ sys->swizzle = hw->swizzle;
+ sys->map_irq = hw->map_irq;
+ sys->resource[0] = &ioport_resource;
+ sys->resource[1] = &iomem_resource;
+
+ ret = hw->setup(nr, sys);
+
+ if (ret > 0) {
+ sys->bus = hw->scan(nr, sys);
+
+ if (!sys->bus)
+ panic("PCI: unable to scan bus!");
+
+ busnr = sys->bus->subordinate + 1;
+
+ list_add(&sys->node, &hw->buses);
+ } else {
+ kfree(sys);
+ if (ret < 0)
+ break;
+ }
+ }
+}
+
+void __init pci_common_init(struct hw_pci *hw)
+{
+ struct pci_sys_data *sys;
+
+ INIT_LIST_HEAD(&hw->buses);
+
+ if (hw->preinit)
+ hw->preinit();
+ pcibios_init_hw(hw);
+ if (hw->postinit)
+ hw->postinit();
+
+ pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
+
+ list_for_each_entry(sys, &hw->buses, node) {
+ struct pci_bus *bus = sys->bus;
+
+ if (!use_firmware) {
+ /*
+ * Size the bridge windows.
+ */
+ pci_bus_size_bridges(bus);
+
+ /*
+ * Assign resources.
+ */
+ pci_bus_assign_resources(bus);
+ }
+
+ /*
+ * Tell drivers about devices found.
+ */
+ pci_bus_add_devices(bus);
+ }
+}
+
+char * __init pcibios_setup(char *str)
+{
+ if (!strcmp(str, "debug")) {
+ debug_pci = 1;
+ return NULL;
+ } else if (!strcmp(str, "firmware")) {
+ use_firmware = 1;
+ return NULL;
+ }
+ return str;
+}
+
+/*
+ * From arch/i386/kernel/pci-i386.c:
+ *
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might be mirrored at 0x0100-0x03ff..
+ */
+void pcibios_align_resource(void *data, struct resource *res,
+ unsigned long size, unsigned long align)
+{
+ unsigned long start = res->start;
+
+ if (res->flags & IORESOURCE_IO && start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+
+ res->start = (start + align - 1) & ~(align - 1);
+}
+
+/**
+ * pcibios_enable_device - Enable I/O and memory.
+ * @dev: PCI device to be enabled
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ int idx;
+ struct resource *r;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ for (idx = 0; idx < 6; idx++) {
+ /* Only set up the requested stuff */
+ if (!(mask & (1 << idx)))
+ continue;
+
+ r = dev->resource + idx;
+ if (!r->start && r->end) {
+ printk(KERN_ERR "PCI: Device %s not available because"
+ " of resource collisions\n", pci_name(dev));
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+
+ /*
+ * Bridges (eg, cardbus bridges) need to be fully enabled
+ */
+ if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
+ cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+
+ if (cmd != old_cmd) {
+ printk("PCI: enabling device %s (%04x -> %04x)\n",
+ pci_name(dev), old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ struct pci_sys_data *root = dev->sysdata;
+ unsigned long phys;
+
+ if (mmap_state == pci_mmap_io) {
+ return -EINVAL;
+ } else {
+ phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT);
+ }
+
+ /*
+ * Mark this as IO
+ */
+ vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (remap_pfn_range(vma, vma->vm_start, phys,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
new file mode 100644
index 00000000000..e5d370c235d
--- /dev/null
+++ b/arch/arm/kernel/calls.S
@@ -0,0 +1,335 @@
+/*
+ * linux/arch/arm/kernel/calls.S
+ *
+ * Copyright (C) 1995-2005 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file is included twice in entry-common.S
+ */
+#ifndef NR_syscalls
+#define NR_syscalls 320
+#else
+
+__syscall_start:
+/* 0 */ .long sys_restart_syscall
+ .long sys_exit
+ .long sys_fork_wrapper
+ .long sys_read
+ .long sys_write
+/* 5 */ .long sys_open
+ .long sys_close
+ .long sys_ni_syscall /* was sys_waitpid */
+ .long sys_creat
+ .long sys_link
+/* 10 */ .long sys_unlink
+ .long sys_execve_wrapper
+ .long sys_chdir
+ .long sys_time /* used by libc4 */
+ .long sys_mknod
+/* 15 */ .long sys_chmod
+ .long sys_lchown16
+ .long sys_ni_syscall /* was sys_break */
+ .long sys_ni_syscall /* was sys_stat */
+ .long sys_lseek
+/* 20 */ .long sys_getpid
+ .long sys_mount
+ .long sys_oldumount /* used by libc4 */
+ .long sys_setuid16
+ .long sys_getuid16
+/* 25 */ .long sys_stime
+ .long sys_ptrace
+ .long sys_alarm /* used by libc4 */
+ .long sys_ni_syscall /* was sys_fstat */
+ .long sys_pause
+/* 30 */ .long sys_utime /* used by libc4 */
+ .long sys_ni_syscall /* was sys_stty */
+ .long sys_ni_syscall /* was sys_getty */
+ .long sys_access
+ .long sys_nice
+/* 35 */ .long sys_ni_syscall /* was sys_ftime */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+/* 40 */ .long sys_rmdir
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_ni_syscall /* was sys_prof */
+/* 45 */ .long sys_brk
+ .long sys_setgid16
+ .long sys_getgid16
+ .long sys_ni_syscall /* was sys_signal */
+ .long sys_geteuid16
+/* 50 */ .long sys_getegid16
+ .long sys_acct
+ .long sys_umount
+ .long sys_ni_syscall /* was sys_lock */
+ .long sys_ioctl
+/* 55 */ .long sys_fcntl
+ .long sys_ni_syscall /* was sys_mpx */
+ .long sys_setpgid
+ .long sys_ni_syscall /* was sys_ulimit */
+ .long sys_ni_syscall /* was sys_olduname */
+/* 60 */ .long sys_umask
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+/* 65 */ .long sys_getpgrp
+ .long sys_setsid
+ .long sys_sigaction
+ .long sys_ni_syscall /* was sys_sgetmask */
+ .long sys_ni_syscall /* was sys_ssetmask */
+/* 70 */ .long sys_setreuid16
+ .long sys_setregid16
+ .long sys_sigsuspend_wrapper
+ .long sys_sigpending
+ .long sys_sethostname
+/* 75 */ .long sys_setrlimit
+ .long sys_old_getrlimit /* used by libc4 */
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+/* 80 */ .long sys_getgroups16
+ .long sys_setgroups16
+ .long old_select /* used by libc4 */
+ .long sys_symlink
+ .long sys_ni_syscall /* was sys_lstat */
+/* 85 */ .long sys_readlink
+ .long sys_uselib
+ .long sys_swapon
+ .long sys_reboot
+ .long old_readdir /* used by libc4 */
+/* 90 */ .long old_mmap /* used by libc4 */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+/* 95 */ .long sys_fchown16
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* was sys_profil */
+ .long sys_statfs
+/* 100 */ .long sys_fstatfs
+ .long sys_ni_syscall
+ .long sys_socketcall
+ .long sys_syslog
+ .long sys_setitimer
+/* 105 */ .long sys_getitimer
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_ni_syscall /* was sys_uname */
+/* 110 */ .long sys_ni_syscall /* was sys_iopl */
+ .long sys_vhangup
+ .long sys_ni_syscall
+ .long sys_syscall /* call a syscall */
+ .long sys_wait4
+/* 115 */ .long sys_swapoff
+ .long sys_sysinfo
+ .long sys_ipc
+ .long sys_fsync
+ .long sys_sigreturn_wrapper
+/* 120 */ .long sys_clone_wrapper
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_ni_syscall
+ .long sys_adjtimex
+/* 125 */ .long sys_mprotect
+ .long sys_sigprocmask
+ .long sys_ni_syscall /* was sys_create_module */
+ .long sys_init_module
+ .long sys_delete_module
+/* 130 */ .long sys_ni_syscall /* was sys_get_kernel_syms */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+/* 135 */ .long sys_sysfs
+ .long sys_personality
+ .long sys_ni_syscall /* .long _sys_afs_syscall */
+ .long sys_setfsuid16
+ .long sys_setfsgid16
+/* 140 */ .long sys_llseek
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_msync
+/* 145 */ .long sys_readv
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+/* 150 */ .long sys_mlock
+ .long sys_munlock
+ .long sys_mlockall
+ .long sys_munlockall
+ .long sys_sched_setparam
+/* 155 */ .long sys_sched_getparam
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+/* 160 */ .long sys_sched_get_priority_min
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_arm_mremap
+ .long sys_setresuid16
+/* 165 */ .long sys_getresuid16
+ .long sys_ni_syscall
+ .long sys_ni_syscall /* was sys_query_module */
+ .long sys_poll
+ .long sys_nfsservctl
+/* 170 */ .long sys_setresgid16
+ .long sys_getresgid16
+ .long sys_prctl
+ .long sys_rt_sigreturn_wrapper
+ .long sys_rt_sigaction
+/* 175 */ .long sys_rt_sigprocmask
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend_wrapper
+/* 180 */ .long sys_pread64
+ .long sys_pwrite64
+ .long sys_chown16
+ .long sys_getcwd
+ .long sys_capget
+/* 185 */ .long sys_capset
+ .long sys_sigaltstack_wrapper
+ .long sys_sendfile
+ .long sys_ni_syscall
+ .long sys_ni_syscall
+/* 190 */ .long sys_vfork_wrapper
+ .long sys_getrlimit
+ .long sys_mmap2
+ .long sys_truncate64
+ .long sys_ftruncate64
+/* 195 */ .long sys_stat64
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_lchown
+ .long sys_getuid
+/* 200 */ .long sys_getgid
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+/* 205 */ .long sys_getgroups
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+/* 210 */ .long sys_setresgid
+ .long sys_getresgid
+ .long sys_chown
+ .long sys_setuid
+ .long sys_setgid
+/* 215 */ .long sys_setfsuid
+ .long sys_setfsgid
+ .long sys_getdents64
+ .long sys_pivot_root
+ .long sys_mincore
+/* 220 */ .long sys_madvise
+ .long sys_fcntl64
+ .long sys_ni_syscall /* TUX */
+ .long sys_ni_syscall
+ .long sys_gettid
+/* 225 */ .long sys_readahead
+ .long sys_setxattr
+ .long sys_lsetxattr
+ .long sys_fsetxattr
+ .long sys_getxattr
+/* 230 */ .long sys_lgetxattr
+ .long sys_fgetxattr
+ .long sys_listxattr
+ .long sys_llistxattr
+ .long sys_flistxattr
+/* 235 */ .long sys_removexattr
+ .long sys_lremovexattr
+ .long sys_fremovexattr
+ .long sys_tkill
+ .long sys_sendfile64
+/* 240 */ .long sys_futex_wrapper
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity
+ .long sys_io_setup
+ .long sys_io_destroy
+/* 245 */ .long sys_io_getevents
+ .long sys_io_submit
+ .long sys_io_cancel
+ .long sys_exit_group
+ .long sys_lookup_dcookie
+/* 250 */ .long sys_epoll_create
+ .long sys_epoll_ctl
+ .long sys_epoll_wait
+ .long sys_remap_file_pages
+ .long sys_ni_syscall /* sys_set_thread_area */
+/* 255 */ .long sys_ni_syscall /* sys_get_thread_area */
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime
+ .long sys_timer_gettime
+/* 260 */ .long sys_timer_getoverrun
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime
+ .long sys_clock_getres
+/* 265 */ .long sys_clock_nanosleep
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill
+ .long sys_utimes
+/* 270 */ .long sys_fadvise64_64
+ .long sys_pciconfig_iobase
+ .long sys_pciconfig_read
+ .long sys_pciconfig_write
+ .long sys_mq_open
+/* 275 */ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive
+ .long sys_mq_notify
+ .long sys_mq_getsetattr
+/* 280 */ .long sys_waitid
+ .long sys_socket
+ .long sys_bind
+ .long sys_connect
+ .long sys_listen
+/* 285 */ .long sys_accept
+ .long sys_getsockname
+ .long sys_getpeername
+ .long sys_socketpair
+ .long sys_send
+/* 290 */ .long sys_sendto
+ .long sys_recv
+ .long sys_recvfrom
+ .long sys_shutdown
+ .long sys_setsockopt
+/* 295 */ .long sys_getsockopt
+ .long sys_sendmsg
+ .long sys_recvmsg
+ .long sys_semop
+ .long sys_semget
+/* 300 */ .long sys_semctl
+ .long sys_msgsnd
+ .long sys_msgrcv
+ .long sys_msgget
+ .long sys_msgctl
+/* 305 */ .long sys_shmat
+ .long sys_shmdt
+ .long sys_shmget
+ .long sys_shmctl
+ .long sys_add_key
+/* 310 */ .long sys_request_key
+ .long sys_keyctl
+ .long sys_semtimedop
+__syscall_end:
+
+ .rept NR_syscalls - (__syscall_end - __syscall_start) / 4
+ .long sys_ni_syscall
+ .endr
+#endif
diff --git a/arch/arm/kernel/compat.c b/arch/arm/kernel/compat.c
new file mode 100644
index 00000000000..7195add42e7
--- /dev/null
+++ b/arch/arm/kernel/compat.c
@@ -0,0 +1,225 @@
+/*
+ * linux/arch/arm/kernel/compat.c
+ *
+ * Copyright (C) 2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * We keep the old params compatibility cruft in one place (here)
+ * so we don't end up with lots of mess around other places.
+ *
+ * NOTE:
+ * The old struct param_struct is deprecated, but it will be kept in
+ * the kernel for 5 years from now (2001). This will allow boot loaders
+ * to convert to the new struct tag way.
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/page.h>
+
+#include <asm/mach/arch.h>
+
+/*
+ * Usage:
+ * - do not go blindly adding fields, add them at the end
+ * - when adding fields, don't rely on the address until
+ * a patch from me has been released
+ * - unused fields should be zero (for future expansion)
+ * - this structure is relatively short-lived - only
+ * guaranteed to contain useful data in setup_arch()
+ *
+ * This is the old deprecated way to pass parameters to the kernel
+ */
+struct param_struct {
+ union {
+ struct {
+ unsigned long page_size; /* 0 */
+ unsigned long nr_pages; /* 4 */
+ unsigned long ramdisk_size; /* 8 */
+ unsigned long flags; /* 12 */
+#define FLAG_READONLY 1
+#define FLAG_RDLOAD 4
+#define FLAG_RDPROMPT 8
+ unsigned long rootdev; /* 16 */
+ unsigned long video_num_cols; /* 20 */
+ unsigned long video_num_rows; /* 24 */
+ unsigned long video_x; /* 28 */
+ unsigned long video_y; /* 32 */
+ unsigned long memc_control_reg; /* 36 */
+ unsigned char sounddefault; /* 40 */
+ unsigned char adfsdrives; /* 41 */
+ unsigned char bytes_per_char_h; /* 42 */
+ unsigned char bytes_per_char_v; /* 43 */
+ unsigned long pages_in_bank[4]; /* 44 */
+ unsigned long pages_in_vram; /* 60 */
+ unsigned long initrd_start; /* 64 */
+ unsigned long initrd_size; /* 68 */
+ unsigned long rd_start; /* 72 */
+ unsigned long system_rev; /* 76 */
+ unsigned long system_serial_low; /* 80 */
+ unsigned long system_serial_high; /* 84 */
+ unsigned long mem_fclk_21285; /* 88 */
+ } s;
+ char unused[256];
+ } u1;
+ union {
+ char paths[8][128];
+ struct {
+ unsigned long magic;
+ char n[1024 - sizeof(unsigned long)];
+ } s;
+ } u2;
+ char commandline[COMMAND_LINE_SIZE];
+};
+
+static struct tag * __init memtag(struct tag *tag, unsigned long start, unsigned long size)
+{
+ tag = tag_next(tag);
+ tag->hdr.tag = ATAG_MEM;
+ tag->hdr.size = tag_size(tag_mem32);
+ tag->u.mem.size = size;
+ tag->u.mem.start = start;
+
+ return tag;
+}
+
+static void __init build_tag_list(struct param_struct *params, void *taglist)
+{
+ struct tag *tag = taglist;
+
+ if (params->u1.s.page_size != PAGE_SIZE) {
+ printk(KERN_WARNING "Warning: bad configuration page, "
+ "trying to continue\n");
+ return;
+ }
+
+ printk(KERN_DEBUG "Converting old-style param struct to taglist\n");
+
+#ifdef CONFIG_ARCH_NETWINDER
+ if (params->u1.s.nr_pages != 0x02000 &&
+ params->u1.s.nr_pages != 0x04000 &&
+ params->u1.s.nr_pages != 0x08000 &&
+ params->u1.s.nr_pages != 0x10000) {
+ printk(KERN_WARNING "Warning: bad NeTTrom parameters "
+ "detected, using defaults\n");
+
+ params->u1.s.nr_pages = 0x1000; /* 16MB */
+ params->u1.s.ramdisk_size = 0;
+ params->u1.s.flags = FLAG_READONLY;
+ params->u1.s.initrd_start = 0;
+ params->u1.s.initrd_size = 0;
+ params->u1.s.rd_start = 0;
+ }
+#endif
+
+ tag->hdr.tag = ATAG_CORE;
+ tag->hdr.size = tag_size(tag_core);
+ tag->u.core.flags = params->u1.s.flags & FLAG_READONLY;
+ tag->u.core.pagesize = params->u1.s.page_size;
+ tag->u.core.rootdev = params->u1.s.rootdev;
+
+ tag = tag_next(tag);
+ tag->hdr.tag = ATAG_RAMDISK;
+ tag->hdr.size = tag_size(tag_ramdisk);
+ tag->u.ramdisk.flags = (params->u1.s.flags & FLAG_RDLOAD ? 1 : 0) |
+ (params->u1.s.flags & FLAG_RDPROMPT ? 2 : 0);
+ tag->u.ramdisk.size = params->u1.s.ramdisk_size;
+ tag->u.ramdisk.start = params->u1.s.rd_start;
+
+ tag = tag_next(tag);
+ tag->hdr.tag = ATAG_INITRD;
+ tag->hdr.size = tag_size(tag_initrd);
+ tag->u.initrd.start = params->u1.s.initrd_start;
+ tag->u.initrd.size = params->u1.s.initrd_size;
+
+ tag = tag_next(tag);
+ tag->hdr.tag = ATAG_SERIAL;
+ tag->hdr.size = tag_size(tag_serialnr);
+ tag->u.serialnr.low = params->u1.s.system_serial_low;
+ tag->u.serialnr.high = params->u1.s.system_serial_high;
+
+ tag = tag_next(tag);
+ tag->hdr.tag = ATAG_REVISION;
+ tag->hdr.size = tag_size(tag_revision);
+ tag->u.revision.rev = params->u1.s.system_rev;
+
+#ifdef CONFIG_ARCH_ACORN
+ if (machine_is_riscpc()) {
+ int i;
+ for (i = 0; i < 4; i++)
+ tag = memtag(tag, PHYS_OFFSET + (i << 26),
+ params->u1.s.pages_in_bank[i] * PAGE_SIZE);
+ } else
+#endif
+ tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE);
+
+#ifdef CONFIG_FOOTBRIDGE
+ if (params->u1.s.mem_fclk_21285) {
+ tag = tag_next(tag);
+ tag->hdr.tag = ATAG_MEMCLK;
+ tag->hdr.size = tag_size(tag_memclk);
+ tag->u.memclk.fmemclk = params->u1.s.mem_fclk_21285;
+ }
+#endif
+
+#ifdef CONFIG_ARCH_EBSA285
+ if (machine_is_ebsa285()) {
+ tag = tag_next(tag);
+ tag->hdr.tag = ATAG_VIDEOTEXT;
+ tag->hdr.size = tag_size(tag_videotext);
+ tag->u.videotext.x = params->u1.s.video_x;
+ tag->u.videotext.y = params->u1.s.video_y;
+ tag->u.videotext.video_page = 0;
+ tag->u.videotext.video_mode = 0;
+ tag->u.videotext.video_cols = params->u1.s.video_num_cols;
+ tag->u.videotext.video_ega_bx = 0;
+ tag->u.videotext.video_lines = params->u1.s.video_num_rows;
+ tag->u.videotext.video_isvga = 1;
+ tag->u.videotext.video_points = 8;
+ }
+#endif
+
+#ifdef CONFIG_ARCH_ACORN
+ tag = tag_next(tag);
+ tag->hdr.tag = ATAG_ACORN;
+ tag->hdr.size = tag_size(tag_acorn);
+ tag->u.acorn.memc_control_reg = params->u1.s.memc_control_reg;
+ tag->u.acorn.vram_pages = params->u1.s.pages_in_vram;
+ tag->u.acorn.sounddefault = params->u1.s.sounddefault;
+ tag->u.acorn.adfsdrives = params->u1.s.adfsdrives;
+#endif
+
+ tag = tag_next(tag);
+ tag->hdr.tag = ATAG_CMDLINE;
+ tag->hdr.size = (strlen(params->commandline) + 3 +
+ sizeof(struct tag_header)) >> 2;
+ strcpy(tag->u.cmdline.cmdline, params->commandline);
+
+ tag = tag_next(tag);
+ tag->hdr.tag = ATAG_NONE;
+ tag->hdr.size = 0;
+
+ memmove(params, taglist, ((int)tag) - ((int)taglist) +
+ sizeof(struct tag_header));
+}
+
+void __init convert_to_tag_list(struct tag *tags)
+{
+ struct param_struct *params = (struct param_struct *)tags;
+ build_tag_list(params, &params->u2);
+}
+
+void __init squash_mem_tags(struct tag *tag)
+{
+ for (; tag->hdr.size; tag = tag_next(tag))
+ if (tag->hdr.tag == ATAG_MEM)
+ tag->hdr.tag = ATAG_NONE;
+}
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
new file mode 100644
index 00000000000..caaa919ab47
--- /dev/null
+++ b/arch/arm/kernel/debug.S
@@ -0,0 +1,106 @@
+/*
+ * linux/arch/arm/kernel/debug.S
+ *
+ * Copyright (C) 1994-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 32-bit debugging code
+ */
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/hardware.h>
+
+ .text
+
+/*
+ * Some debugging routines (useful if you've got MM problems and
+ * printk isn't working). For DEBUGGING ONLY!!! Do not leave
+ * references to these in a production kernel!
+ */
+
+#if defined(CONFIG_DEBUG_ICEDCC)
+ @@ debug using ARM EmbeddedICE DCC channel
+ .macro addruart, rx
+ .endm
+
+ .macro senduart, rd, rx
+ mcr p14, 0, \rd, c1, c0, 0
+ .endm
+
+ .macro busyuart, rd, rx
+1001:
+ mrc p14, 0, \rx, c0, c0, 0
+ tst \rx, #2
+ beq 1001b
+
+ .endm
+
+ .macro waituart, rd, rx
+ mov \rd, #0x2000000
+1001:
+ subs \rd, \rd, #1
+ bmi 1002f
+ mrc p14, 0, \rx, c0, c0, 0
+ tst \rx, #2
+ bne 1001b
+1002:
+ .endm
+#else
+#include <asm/arch/debug-macro.S>
+#endif
+
+/*
+ * Useful debugging routines
+ */
+ENTRY(printhex8)
+ mov r1, #8
+ b printhex
+
+ENTRY(printhex4)
+ mov r1, #4
+ b printhex
+
+ENTRY(printhex2)
+ mov r1, #2
+printhex: adr r2, hexbuf
+ add r3, r2, r1
+ mov r1, #0
+ strb r1, [r3]
+1: and r1, r0, #15
+ mov r0, r0, lsr #4
+ cmp r1, #10
+ addlt r1, r1, #'0'
+ addge r1, r1, #'a' - 10
+ strb r1, [r3, #-1]!
+ teq r3, r2
+ bne 1b
+ mov r0, r2
+ b printascii
+
+ .ltorg
+
+ENTRY(printascii)
+ addruart r3
+ b 2f
+1: waituart r2, r3
+ senduart r1, r3
+ busyuart r2, r3
+ teq r1, #'\n'
+ moveq r1, #'\r'
+ beq 1b
+2: teq r0, #0
+ ldrneb r1, [r0], #1
+ teqne r1, #0
+ bne 1b
+ mov pc, lr
+
+ENTRY(printch)
+ addruart r3
+ mov r1, r0
+ mov r0, #0
+ b 1b
+
+hexbuf: .space 16
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c
new file mode 100644
index 00000000000..e9a36304ec3
--- /dev/null
+++ b/arch/arm/kernel/dma-isa.c
@@ -0,0 +1,207 @@
+/*
+ * linux/arch/arm/kernel/dma-isa.c
+ *
+ * Copyright (C) 1999-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * ISA DMA primitives
+ * Taken from various sources, including:
+ * linux/include/asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen and John Boyd,
+ * Nov. 1992.
+ * arch/arm/kernel/dma-ebsa285.c
+ * Copyright (C) 1998 Phil Blundell
+ */
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+
+#include <asm/mach/dma.h>
+
+#define ISA_DMA_MODE_READ 0x44
+#define ISA_DMA_MODE_WRITE 0x48
+#define ISA_DMA_MODE_CASCADE 0xc0
+#define ISA_DMA_AUTOINIT 0x10
+
+#define ISA_DMA_MASK 0
+#define ISA_DMA_MODE 1
+#define ISA_DMA_CLRFF 2
+#define ISA_DMA_PGHI 3
+#define ISA_DMA_PGLO 4
+#define ISA_DMA_ADDR 5
+#define ISA_DMA_COUNT 6
+
+static unsigned int isa_dma_port[8][7] = {
+ /* MASK MODE CLRFF PAGE_HI PAGE_LO ADDR COUNT */
+ { 0x0a, 0x0b, 0x0c, 0x487, 0x087, 0x00, 0x01 },
+ { 0x0a, 0x0b, 0x0c, 0x483, 0x083, 0x02, 0x03 },
+ { 0x0a, 0x0b, 0x0c, 0x481, 0x081, 0x04, 0x05 },
+ { 0x0a, 0x0b, 0x0c, 0x482, 0x082, 0x06, 0x07 },
+ { 0xd4, 0xd6, 0xd8, 0x000, 0x000, 0xc0, 0xc2 },
+ { 0xd4, 0xd6, 0xd8, 0x48b, 0x08b, 0xc4, 0xc6 },
+ { 0xd4, 0xd6, 0xd8, 0x489, 0x089, 0xc8, 0xca },
+ { 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce }
+};
+
+static int isa_get_dma_residue(dmach_t channel, dma_t *dma)
+{
+ unsigned int io_port = isa_dma_port[channel][ISA_DMA_COUNT];
+ int count;
+
+ count = 1 + inb(io_port);
+ count |= inb(io_port) << 8;
+
+ return channel < 4 ? count : (count << 1);
+}
+
+static void isa_enable_dma(dmach_t channel, dma_t *dma)
+{
+ if (dma->invalid) {
+ unsigned long address, length;
+ unsigned int mode, direction;
+
+ mode = channel & 3;
+ switch (dma->dma_mode & DMA_MODE_MASK) {
+ case DMA_MODE_READ:
+ mode |= ISA_DMA_MODE_READ;
+ direction = PCI_DMA_FROMDEVICE;
+ break;
+
+ case DMA_MODE_WRITE:
+ mode |= ISA_DMA_MODE_WRITE;
+ direction = PCI_DMA_TODEVICE;
+ break;
+
+ case DMA_MODE_CASCADE:
+ mode |= ISA_DMA_MODE_CASCADE;
+ direction = PCI_DMA_BIDIRECTIONAL;
+ break;
+
+ default:
+ direction = PCI_DMA_NONE;
+ break;
+ }
+
+ if (!dma->using_sg) {
+ /*
+ * Cope with ISA-style drivers which expect cache
+ * coherence.
+ */
+ dma->buf.dma_address = pci_map_single(NULL,
+ dma->buf.__address, dma->buf.length,
+ direction);
+ }
+
+ address = dma->buf.dma_address;
+ length = dma->buf.length - 1;
+
+ outb(address >> 16, isa_dma_port[channel][ISA_DMA_PGLO]);
+ outb(address >> 24, isa_dma_port[channel][ISA_DMA_PGHI]);
+
+ if (channel >= 4) {
+ address >>= 1;
+ length >>= 1;
+ }
+
+ outb(0, isa_dma_port[channel][ISA_DMA_CLRFF]);
+
+ outb(address, isa_dma_port[channel][ISA_DMA_ADDR]);
+ outb(address >> 8, isa_dma_port[channel][ISA_DMA_ADDR]);
+
+ outb(length, isa_dma_port[channel][ISA_DMA_COUNT]);
+ outb(length >> 8, isa_dma_port[channel][ISA_DMA_COUNT]);
+
+ if (dma->dma_mode & DMA_AUTOINIT)
+ mode |= ISA_DMA_AUTOINIT;
+
+ outb(mode, isa_dma_port[channel][ISA_DMA_MODE]);
+ dma->invalid = 0;
+ }
+ outb(channel & 3, isa_dma_port[channel][ISA_DMA_MASK]);
+}
+
+static void isa_disable_dma(dmach_t channel, dma_t *dma)
+{
+ outb(channel | 4, isa_dma_port[channel][ISA_DMA_MASK]);
+}
+
+static struct dma_ops isa_dma_ops = {
+ .type = "ISA",
+ .enable = isa_enable_dma,
+ .disable = isa_disable_dma,
+ .residue = isa_get_dma_residue,
+};
+
+static struct resource dma_resources[] = {
+ { "dma1", 0x0000, 0x000f },
+ { "dma low page", 0x0080, 0x008f },
+ { "dma2", 0x00c0, 0x00df },
+ { "dma high page", 0x0480, 0x048f }
+};
+
+void __init isa_init_dma(dma_t *dma)
+{
+ /*
+ * Try to autodetect presence of an ISA DMA controller.
+ * We do some minimal initialisation, and check that
+ * channel 0's DMA address registers are writeable.
+ */
+ outb(0xff, 0x0d);
+ outb(0xff, 0xda);
+
+ /*
+ * Write high and low address, and then read them back
+ * in the same order.
+ */
+ outb(0x55, 0x00);
+ outb(0xaa, 0x00);
+
+ if (inb(0) == 0x55 && inb(0) == 0xaa) {
+ int channel, i;
+
+ for (channel = 0; channel < 8; channel++) {
+ dma[channel].d_ops = &isa_dma_ops;
+ isa_disable_dma(channel, NULL);
+ }
+
+ outb(0x40, 0x0b);
+ outb(0x41, 0x0b);
+ outb(0x42, 0x0b);
+ outb(0x43, 0x0b);
+
+ outb(0xc0, 0xd6);
+ outb(0x41, 0xd6);
+ outb(0x42, 0xd6);
+ outb(0x43, 0xd6);
+
+ outb(0, 0xd4);
+
+ outb(0x10, 0x08);
+ outb(0x10, 0xd0);
+
+ /*
+ * Is this correct? According to my documentation, it
+ * doesn't appear to be. It should be:
+ * outb(0x3f, 0x40b); outb(0x3f, 0x4d6);
+ */
+ outb(0x30, 0x40b);
+ outb(0x31, 0x40b);
+ outb(0x32, 0x40b);
+ outb(0x33, 0x40b);
+ outb(0x31, 0x4d6);
+ outb(0x32, 0x4d6);
+ outb(0x33, 0x4d6);
+
+ request_dma(DMA_ISA_CASCADE, "cascade");
+
+ for (i = 0; i < sizeof(dma_resources) / sizeof(dma_resources[0]); i++)
+ request_resource(&ioport_resource, dma_resources + i);
+ }
+}
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
new file mode 100644
index 00000000000..2b788388423
--- /dev/null
+++ b/arch/arm/kernel/dma.c
@@ -0,0 +1,302 @@
+/*
+ * linux/arch/arm/kernel/dma.c
+ *
+ * Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Front-end to the DMA handling. This handles the allocation/freeing
+ * of DMA channels, and provides a unified interface to the machines
+ * DMA facilities.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mman.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+
+#include <asm/dma.h>
+
+#include <asm/mach/dma.h>
+
+DEFINE_SPINLOCK(dma_spin_lock);
+
+#if MAX_DMA_CHANNELS > 0
+
+static dma_t dma_chan[MAX_DMA_CHANNELS];
+
+/*
+ * Get dma list for /proc/dma
+ */
+int get_dma_list(char *buf)
+{
+ dma_t *dma;
+ char *p = buf;
+ int i;
+
+ for (i = 0, dma = dma_chan; i < MAX_DMA_CHANNELS; i++, dma++)
+ if (dma->lock)
+ p += sprintf(p, "%2d: %14s %s\n", i,
+ dma->d_ops->type, dma->device_id);
+
+ return p - buf;
+}
+
+/*
+ * Request DMA channel
+ *
+ * On certain platforms, we have to allocate an interrupt as well...
+ */
+int request_dma(dmach_t channel, const char *device_id)
+{
+ dma_t *dma = dma_chan + channel;
+ int ret;
+
+ if (channel >= MAX_DMA_CHANNELS || !dma->d_ops)
+ goto bad_dma;
+
+ if (xchg(&dma->lock, 1) != 0)
+ goto busy;
+
+ dma->device_id = device_id;
+ dma->active = 0;
+ dma->invalid = 1;
+
+ ret = 0;
+ if (dma->d_ops->request)
+ ret = dma->d_ops->request(channel, dma);
+
+ if (ret)
+ xchg(&dma->lock, 0);
+
+ return ret;
+
+bad_dma:
+ printk(KERN_ERR "dma: trying to allocate DMA%d\n", channel);
+ return -EINVAL;
+
+busy:
+ return -EBUSY;
+}
+
+/*
+ * Free DMA channel
+ *
+ * On certain platforms, we have to free interrupt as well...
+ */
+void free_dma(dmach_t channel)
+{
+ dma_t *dma = dma_chan + channel;
+
+ if (channel >= MAX_DMA_CHANNELS || !dma->d_ops)
+ goto bad_dma;
+
+ if (dma->active) {
+ printk(KERN_ERR "dma%d: freeing active DMA\n", channel);
+ dma->d_ops->disable(channel, dma);
+ dma->active = 0;
+ }
+
+ if (xchg(&dma->lock, 0) != 0) {
+ if (dma->d_ops->free)
+ dma->d_ops->free(channel, dma);
+ return;
+ }
+
+ printk(KERN_ERR "dma%d: trying to free free DMA\n", channel);
+ return;
+
+bad_dma:
+ printk(KERN_ERR "dma: trying to free DMA%d\n", channel);
+}
+
+/* Set DMA Scatter-Gather list
+ */
+void set_dma_sg (dmach_t channel, struct scatterlist *sg, int nr_sg)
+{
+ dma_t *dma = dma_chan + channel;
+
+ if (dma->active)
+ printk(KERN_ERR "dma%d: altering DMA SG while "
+ "DMA active\n", channel);
+
+ dma->sg = sg;
+ dma->sgcount = nr_sg;
+ dma->using_sg = 1;
+ dma->invalid = 1;
+}
+
+/* Set DMA address
+ *
+ * Copy address to the structure, and set the invalid bit
+ */
+void set_dma_addr (dmach_t channel, unsigned long physaddr)
+{
+ dma_t *dma = dma_chan + channel;
+
+ if (dma->active)
+ printk(KERN_ERR "dma%d: altering DMA address while "
+ "DMA active\n", channel);
+
+ dma->sg = &dma->buf;
+ dma->sgcount = 1;
+ dma->buf.__address = bus_to_virt(physaddr);
+ dma->using_sg = 0;
+ dma->invalid = 1;
+}
+
+/* Set DMA byte count
+ *
+ * Copy address to the structure, and set the invalid bit
+ */
+void set_dma_count (dmach_t channel, unsigned long count)
+{
+ dma_t *dma = dma_chan + channel;
+
+ if (dma->active)
+ printk(KERN_ERR "dma%d: altering DMA count while "
+ "DMA active\n", channel);
+
+ dma->sg = &dma->buf;
+ dma->sgcount = 1;
+ dma->buf.length = count;
+ dma->using_sg = 0;
+ dma->invalid = 1;
+}
+
+/* Set DMA direction mode
+ */
+void set_dma_mode (dmach_t channel, dmamode_t mode)
+{
+ dma_t *dma = dma_chan + channel;
+
+ if (dma->active)
+ printk(KERN_ERR "dma%d: altering DMA mode while "
+ "DMA active\n", channel);
+
+ dma->dma_mode = mode;
+ dma->invalid = 1;
+}
+
+/* Enable DMA channel
+ */
+void enable_dma (dmach_t channel)
+{
+ dma_t *dma = dma_chan + channel;
+
+ if (!dma->lock)
+ goto free_dma;
+
+ if (dma->active == 0) {
+ dma->active = 1;
+ dma->d_ops->enable(channel, dma);
+ }
+ return;
+
+free_dma:
+ printk(KERN_ERR "dma%d: trying to enable free DMA\n", channel);
+ BUG();
+}
+
+/* Disable DMA channel
+ */
+void disable_dma (dmach_t channel)
+{
+ dma_t *dma = dma_chan + channel;
+
+ if (!dma->lock)
+ goto free_dma;
+
+ if (dma->active == 1) {
+ dma->active = 0;
+ dma->d_ops->disable(channel, dma);
+ }
+ return;
+
+free_dma:
+ printk(KERN_ERR "dma%d: trying to disable free DMA\n", channel);
+ BUG();
+}
+
+/*
+ * Is the specified DMA channel active?
+ */
+int dma_channel_active(dmach_t channel)
+{
+ return dma_chan[channel].active;
+}
+
+void set_dma_page(dmach_t channel, char pagenr)
+{
+ printk(KERN_ERR "dma%d: trying to set_dma_page\n", channel);
+}
+
+void set_dma_speed(dmach_t channel, int cycle_ns)
+{
+ dma_t *dma = dma_chan + channel;
+ int ret = 0;
+
+ if (dma->d_ops->setspeed)
+ ret = dma->d_ops->setspeed(channel, dma, cycle_ns);
+ dma->speed = ret;
+}
+
+int get_dma_residue(dmach_t channel)
+{
+ dma_t *dma = dma_chan + channel;
+ int ret = 0;
+
+ if (dma->d_ops->residue)
+ ret = dma->d_ops->residue(channel, dma);
+
+ return ret;
+}
+
+void __init init_dma(void)
+{
+ arch_dma_init(dma_chan);
+}
+
+#else
+
+int request_dma(dmach_t channel, const char *device_id)
+{
+ return -EINVAL;
+}
+
+int get_dma_residue(dmach_t channel)
+{
+ return 0;
+}
+
+#define GLOBAL_ALIAS(_a,_b) asm (".set " #_a "," #_b "; .globl " #_a)
+GLOBAL_ALIAS(disable_dma, get_dma_residue);
+GLOBAL_ALIAS(enable_dma, get_dma_residue);
+GLOBAL_ALIAS(free_dma, get_dma_residue);
+GLOBAL_ALIAS(get_dma_list, get_dma_residue);
+GLOBAL_ALIAS(set_dma_mode, get_dma_residue);
+GLOBAL_ALIAS(set_dma_page, get_dma_residue);
+GLOBAL_ALIAS(set_dma_count, get_dma_residue);
+GLOBAL_ALIAS(set_dma_addr, get_dma_residue);
+GLOBAL_ALIAS(set_dma_sg, get_dma_residue);
+GLOBAL_ALIAS(set_dma_speed, get_dma_residue);
+GLOBAL_ALIAS(init_dma, get_dma_residue);
+
+#endif
+
+EXPORT_SYMBOL(request_dma);
+EXPORT_SYMBOL(free_dma);
+EXPORT_SYMBOL(enable_dma);
+EXPORT_SYMBOL(disable_dma);
+EXPORT_SYMBOL(set_dma_addr);
+EXPORT_SYMBOL(set_dma_count);
+EXPORT_SYMBOL(set_dma_mode);
+EXPORT_SYMBOL(set_dma_page);
+EXPORT_SYMBOL(get_dma_residue);
+EXPORT_SYMBOL(set_dma_sg);
+EXPORT_SYMBOL(set_dma_speed);
+
+EXPORT_SYMBOL(dma_spin_lock);
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
new file mode 100644
index 00000000000..3dc15b131f5
--- /dev/null
+++ b/arch/arm/kernel/ecard.c
@@ -0,0 +1,1210 @@
+/*
+ * linux/arch/arm/kernel/ecard.c
+ *
+ * Copyright 1995-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Find all installed expansion cards, and handle interrupts from them.
+ *
+ * Created from information from Acorns RiscOS3 PRMs
+ *
+ * 08-Dec-1996 RMK Added code for the 9'th expansion card - the ether
+ * podule slot.
+ * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work.
+ * 12-Sep-1997 RMK Created new handling of interrupt enables/disables
+ * - cards can now register their own routine to control
+ * interrupts (recommended).
+ * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled
+ * on reset from Linux. (Caused cards not to respond
+ * under RiscOS without hard reset).
+ * 15-Feb-1998 RMK Added DMA support
+ * 12-Sep-1998 RMK Added EASI support
+ * 10-Jan-1999 RMK Run loaders in a simulated RISC OS environment.
+ * 17-Apr-1999 RMK Support for EASI Type C cycles.
+ */
+#define ECARD_C
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/reboot.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/init.h>
+
+#include <asm/dma.h>
+#include <asm/ecard.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/mach/irq.h>
+#include <asm/tlbflush.h>
+
+#ifndef CONFIG_ARCH_RPC
+#define HAVE_EXPMASK
+#endif
+
+struct ecard_request {
+ void (*fn)(struct ecard_request *);
+ ecard_t *ec;
+ unsigned int address;
+ unsigned int length;
+ unsigned int use_loader;
+ void *buffer;
+ struct completion *complete;
+};
+
+struct expcard_blacklist {
+ unsigned short manufacturer;
+ unsigned short product;
+ const char *type;
+};
+
+static ecard_t *cards;
+static ecard_t *slot_to_expcard[MAX_ECARDS];
+static unsigned int ectcr;
+#ifdef HAS_EXPMASK
+static unsigned int have_expmask;
+#endif
+
+/* List of descriptions of cards which don't have an extended
+ * identification, or chunk directories containing a description.
+ */
+static struct expcard_blacklist __initdata blacklist[] = {
+ { MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" }
+};
+
+asmlinkage extern int
+ecard_loader_reset(unsigned long base, loader_t loader);
+asmlinkage extern int
+ecard_loader_read(int off, unsigned long base, loader_t loader);
+
+static inline unsigned short ecard_getu16(unsigned char *v)
+{
+ return v[0] | v[1] << 8;
+}
+
+static inline signed long ecard_gets24(unsigned char *v)
+{
+ return v[0] | v[1] << 8 | v[2] << 16 | ((v[2] & 0x80) ? 0xff000000 : 0);
+}
+
+static inline ecard_t *slot_to_ecard(unsigned int slot)
+{
+ return slot < MAX_ECARDS ? slot_to_expcard[slot] : NULL;
+}
+
+/* ===================== Expansion card daemon ======================== */
+/*
+ * Since the loader programs on the expansion cards need to be run
+ * in a specific environment, create a separate task with this
+ * environment up, and pass requests to this task as and when we
+ * need to.
+ *
+ * This should allow 99% of loaders to be called from Linux.
+ *
+ * From a security standpoint, we trust the card vendors. This
+ * may be a misplaced trust.
+ */
+static void ecard_task_reset(struct ecard_request *req)
+{
+ struct expansion_card *ec = req->ec;
+ struct resource *res;
+
+ res = ec->slot_no == 8
+ ? &ec->resource[ECARD_RES_MEMC]
+ : ec->type == ECARD_EASI
+ ? &ec->resource[ECARD_RES_EASI]
+ : &ec->resource[ECARD_RES_IOCSYNC];
+
+ ecard_loader_reset(res->start, ec->loader);
+}
+
+static void ecard_task_readbytes(struct ecard_request *req)
+{
+ struct expansion_card *ec = req->ec;
+ unsigned char *buf = req->buffer;
+ unsigned int len = req->length;
+ unsigned int off = req->address;
+
+ if (ec->slot_no == 8) {
+ void __iomem *base = (void __iomem *)
+ ec->resource[ECARD_RES_MEMC].start;
+
+ /*
+ * The card maintains an index which increments the address
+ * into a 4096-byte page on each access. We need to keep
+ * track of the counter.
+ */
+ static unsigned int index;
+ unsigned int page;
+
+ page = (off >> 12) * 4;
+ if (page > 256 * 4)
+ return;
+
+ off &= 4095;
+
+ /*
+ * If we are reading offset 0, or our current index is
+ * greater than the offset, reset the hardware index counter.
+ */
+ if (off == 0 || index > off) {
+ writeb(0, base);
+ index = 0;
+ }
+
+ /*
+ * Increment the hardware index counter until we get to the
+ * required offset. The read bytes are discarded.
+ */
+ while (index < off) {
+ readb(base + page);
+ index += 1;
+ }
+
+ while (len--) {
+ *buf++ = readb(base + page);
+ index += 1;
+ }
+ } else {
+ unsigned long base = (ec->type == ECARD_EASI
+ ? &ec->resource[ECARD_RES_EASI]
+ : &ec->resource[ECARD_RES_IOCSYNC])->start;
+ void __iomem *pbase = (void __iomem *)base;
+
+ if (!req->use_loader || !ec->loader) {
+ off *= 4;
+ while (len--) {
+ *buf++ = readb(pbase + off);
+ off += 4;
+ }
+ } else {
+ while(len--) {
+ /*
+ * The following is required by some
+ * expansion card loader programs.
+ */
+ *(unsigned long *)0x108 = 0;
+ *buf++ = ecard_loader_read(off++, base,
+ ec->loader);
+ }
+ }
+ }
+
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(ecard_wait);
+static struct ecard_request *ecard_req;
+static DECLARE_MUTEX(ecard_sem);
+
+/*
+ * Set up the expansion card daemon's page tables.
+ */
+static void ecard_init_pgtables(struct mm_struct *mm)
+{
+ struct vm_area_struct vma;
+
+ /* We want to set up the page tables for the following mapping:
+ * Virtual Physical
+ * 0x03000000 0x03000000
+ * 0x03010000 unmapped
+ * 0x03210000 0x03210000
+ * 0x03400000 unmapped
+ * 0x08000000 0x08000000
+ * 0x10000000 unmapped
+ *
+ * FIXME: we don't follow this 100% yet.
+ */
+ pgd_t *src_pgd, *dst_pgd;
+
+ src_pgd = pgd_offset(mm, (unsigned long)IO_BASE);
+ dst_pgd = pgd_offset(mm, IO_START);
+
+ memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (IO_SIZE / PGDIR_SIZE));
+
+ src_pgd = pgd_offset(mm, EASI_BASE);
+ dst_pgd = pgd_offset(mm, EASI_START);
+
+ memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
+
+ vma.vm_mm = mm;
+
+ flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
+ flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
+}
+
+static int ecard_init_mm(void)
+{
+ struct mm_struct * mm = mm_alloc();
+ struct mm_struct *active_mm = current->active_mm;
+
+ if (!mm)
+ return -ENOMEM;
+
+ current->mm = mm;
+ current->active_mm = mm;
+ activate_mm(active_mm, mm);
+ mmdrop(active_mm);
+ ecard_init_pgtables(mm);
+ return 0;
+}
+
+static int
+ecard_task(void * unused)
+{
+ daemonize("kecardd");
+
+ /*
+ * Allocate a mm. We're not a lazy-TLB kernel task since we need
+ * to set page table entries where the user space would be. Note
+ * that this also creates the page tables. Failure is not an
+ * option here.
+ */
+ if (ecard_init_mm())
+ panic("kecardd: unable to alloc mm\n");
+
+ while (1) {
+ struct ecard_request *req;
+
+ wait_event_interruptible(ecard_wait, ecard_req != NULL);
+
+ req = xchg(&ecard_req, NULL);
+ if (req != NULL) {
+ req->fn(req);
+ complete(req->complete);
+ }
+ }
+}
+
+/*
+ * Wake the expansion card daemon to action our request.
+ *
+ * FIXME: The test here is not sufficient to detect if the
+ * kcardd is running.
+ */
+static void ecard_call(struct ecard_request *req)
+{
+ DECLARE_COMPLETION(completion);
+
+ req->complete = &completion;
+
+ down(&ecard_sem);
+ ecard_req = req;
+ wake_up(&ecard_wait);
+
+ /*
+ * Now wait for kecardd to run.
+ */
+ wait_for_completion(&completion);
+ up(&ecard_sem);
+}
+
+/* ======================= Mid-level card control ===================== */
+
+static void
+ecard_readbytes(void *addr, ecard_t *ec, int off, int len, int useld)
+{
+ struct ecard_request req;
+
+ req.fn = ecard_task_readbytes;
+ req.ec = ec;
+ req.address = off;
+ req.length = len;
+ req.use_loader = useld;
+ req.buffer = addr;
+
+ ecard_call(&req);
+}
+
+int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num)
+{
+ struct ex_chunk_dir excd;
+ int index = 16;
+ int useld = 0;
+
+ if (!ec->cid.cd)
+ return 0;
+
+ while(1) {
+ ecard_readbytes(&excd, ec, index, 8, useld);
+ index += 8;
+ if (c_id(&excd) == 0) {
+ if (!useld && ec->loader) {
+ useld = 1;
+ index = 0;
+ continue;
+ }
+ return 0;
+ }
+ if (c_id(&excd) == 0xf0) { /* link */
+ index = c_start(&excd);
+ continue;
+ }
+ if (c_id(&excd) == 0x80) { /* loader */
+ if (!ec->loader) {
+ ec->loader = (loader_t)kmalloc(c_len(&excd),
+ GFP_KERNEL);
+ if (ec->loader)
+ ecard_readbytes(ec->loader, ec,
+ (int)c_start(&excd),
+ c_len(&excd), useld);
+ else
+ return 0;
+ }
+ continue;
+ }
+ if (c_id(&excd) == id && num-- == 0)
+ break;
+ }
+
+ if (c_id(&excd) & 0x80) {
+ switch (c_id(&excd) & 0x70) {
+ case 0x70:
+ ecard_readbytes((unsigned char *)excd.d.string, ec,
+ (int)c_start(&excd), c_len(&excd),
+ useld);
+ break;
+ case 0x00:
+ break;
+ }
+ }
+ cd->start_offset = c_start(&excd);
+ memcpy(cd->d.string, excd.d.string, 256);
+ return 1;
+}
+
+/* ======================= Interrupt control ============================ */
+
+static void ecard_def_irq_enable(ecard_t *ec, int irqnr)
+{
+#ifdef HAS_EXPMASK
+ if (irqnr < 4 && have_expmask) {
+ have_expmask |= 1 << irqnr;
+ __raw_writeb(have_expmask, EXPMASK_ENABLE);
+ }
+#endif
+}
+
+static void ecard_def_irq_disable(ecard_t *ec, int irqnr)
+{
+#ifdef HAS_EXPMASK
+ if (irqnr < 4 && have_expmask) {
+ have_expmask &= ~(1 << irqnr);
+ __raw_writeb(have_expmask, EXPMASK_ENABLE);
+ }
+#endif
+}
+
+static int ecard_def_irq_pending(ecard_t *ec)
+{
+ return !ec->irqmask || readb(ec->irqaddr) & ec->irqmask;
+}
+
+static void ecard_def_fiq_enable(ecard_t *ec, int fiqnr)
+{
+ panic("ecard_def_fiq_enable called - impossible");
+}
+
+static void ecard_def_fiq_disable(ecard_t *ec, int fiqnr)
+{
+ panic("ecard_def_fiq_disable called - impossible");
+}
+
+static int ecard_def_fiq_pending(ecard_t *ec)
+{
+ return !ec->fiqmask || readb(ec->fiqaddr) & ec->fiqmask;
+}
+
+static expansioncard_ops_t ecard_default_ops = {
+ ecard_def_irq_enable,
+ ecard_def_irq_disable,
+ ecard_def_irq_pending,
+ ecard_def_fiq_enable,
+ ecard_def_fiq_disable,
+ ecard_def_fiq_pending
+};
+
+/*
+ * Enable and disable interrupts from expansion cards.
+ * (interrupts are disabled for these functions).
+ *
+ * They are not meant to be called directly, but via enable/disable_irq.
+ */
+static void ecard_irq_unmask(unsigned int irqnr)
+{
+ ecard_t *ec = slot_to_ecard(irqnr - 32);
+
+ if (ec) {
+ if (!ec->ops)
+ ec->ops = &ecard_default_ops;
+
+ if (ec->claimed && ec->ops->irqenable)
+ ec->ops->irqenable(ec, irqnr);
+ else
+ printk(KERN_ERR "ecard: rejecting request to "
+ "enable IRQs for %d\n", irqnr);
+ }
+}
+
+static void ecard_irq_mask(unsigned int irqnr)
+{
+ ecard_t *ec = slot_to_ecard(irqnr - 32);
+
+ if (ec) {
+ if (!ec->ops)
+ ec->ops = &ecard_default_ops;
+
+ if (ec->ops && ec->ops->irqdisable)
+ ec->ops->irqdisable(ec, irqnr);
+ }
+}
+
+static struct irqchip ecard_chip = {
+ .ack = ecard_irq_mask,
+ .mask = ecard_irq_mask,
+ .unmask = ecard_irq_unmask,
+};
+
+void ecard_enablefiq(unsigned int fiqnr)
+{
+ ecard_t *ec = slot_to_ecard(fiqnr);
+
+ if (ec) {
+ if (!ec->ops)
+ ec->ops = &ecard_default_ops;
+
+ if (ec->claimed && ec->ops->fiqenable)
+ ec->ops->fiqenable(ec, fiqnr);
+ else
+ printk(KERN_ERR "ecard: rejecting request to "
+ "enable FIQs for %d\n", fiqnr);
+ }
+}
+
+void ecard_disablefiq(unsigned int fiqnr)
+{
+ ecard_t *ec = slot_to_ecard(fiqnr);
+
+ if (ec) {
+ if (!ec->ops)
+ ec->ops = &ecard_default_ops;
+
+ if (ec->ops->fiqdisable)
+ ec->ops->fiqdisable(ec, fiqnr);
+ }
+}
+
+static void ecard_dump_irq_state(void)
+{
+ ecard_t *ec;
+
+ printk("Expansion card IRQ state:\n");
+
+ for (ec = cards; ec; ec = ec->next) {
+ if (ec->slot_no == 8)
+ continue;
+
+ printk(" %d: %sclaimed, ",
+ ec->slot_no, ec->claimed ? "" : "not ");
+
+ if (ec->ops && ec->ops->irqpending &&
+ ec->ops != &ecard_default_ops)
+ printk("irq %spending\n",
+ ec->ops->irqpending(ec) ? "" : "not ");
+ else
+ printk("irqaddr %p, mask = %02X, status = %02X\n",
+ ec->irqaddr, ec->irqmask, readb(ec->irqaddr));
+ }
+}
+
+static void ecard_check_lockup(struct irqdesc *desc)
+{
+ static unsigned long last;
+ static int lockup;
+
+ /*
+ * If the timer interrupt has not run since the last million
+ * unrecognised expansion card interrupts, then there is
+ * something seriously wrong. Disable the expansion card
+ * interrupts so at least we can continue.
+ *
+ * Maybe we ought to start a timer to re-enable them some time
+ * later?
+ */
+ if (last == jiffies) {
+ lockup += 1;
+ if (lockup > 1000000) {
+ printk(KERN_ERR "\nInterrupt lockup detected - "
+ "disabling all expansion card interrupts\n");
+
+ desc->chip->mask(IRQ_EXPANSIONCARD);
+ ecard_dump_irq_state();
+ }
+ } else
+ lockup = 0;
+
+ /*
+ * If we did not recognise the source of this interrupt,
+ * warn the user, but don't flood the user with these messages.
+ */
+ if (!last || time_after(jiffies, last + 5*HZ)) {
+ last = jiffies;
+ printk(KERN_WARNING "Unrecognised interrupt from backplane\n");
+ ecard_dump_irq_state();
+ }
+}
+
+static void
+ecard_irq_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
+{
+ ecard_t *ec;
+ int called = 0;
+
+ desc->chip->mask(irq);
+ for (ec = cards; ec; ec = ec->next) {
+ int pending;
+
+ if (!ec->claimed || ec->irq == NO_IRQ || ec->slot_no == 8)
+ continue;
+
+ if (ec->ops && ec->ops->irqpending)
+ pending = ec->ops->irqpending(ec);
+ else
+ pending = ecard_default_ops.irqpending(ec);
+
+ if (pending) {
+ struct irqdesc *d = irq_desc + ec->irq;
+ d->handle(ec->irq, d, regs);
+ called ++;
+ }
+ }
+ desc->chip->unmask(irq);
+
+ if (called == 0)
+ ecard_check_lockup(desc);
+}
+
+#ifdef HAS_EXPMASK
+static unsigned char priority_masks[] =
+{
+ 0xf0, 0xf1, 0xf3, 0xf7, 0xff, 0xff, 0xff, 0xff
+};
+
+static unsigned char first_set[] =
+{
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00
+};
+
+static void
+ecard_irqexp_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
+{
+ const unsigned int statusmask = 15;
+ unsigned int status;
+
+ status = __raw_readb(EXPMASK_STATUS) & statusmask;
+ if (status) {
+ unsigned int slot = first_set[status];
+ ecard_t *ec = slot_to_ecard(slot);
+
+ if (ec->claimed) {
+ struct irqdesc *d = irqdesc + ec->irq;
+ /*
+ * this ugly code is so that we can operate a
+ * prioritorising system:
+ *
+ * Card 0 highest priority
+ * Card 1
+ * Card 2
+ * Card 3 lowest priority
+ *
+ * Serial cards should go in 0/1, ethernet/scsi in 2/3
+ * otherwise you will lose serial data at high speeds!
+ */
+ d->handle(ec->irq, d, regs);
+ } else {
+ printk(KERN_WARNING "card%d: interrupt from unclaimed "
+ "card???\n", slot);
+ have_expmask &= ~(1 << slot);
+ __raw_writeb(have_expmask, EXPMASK_ENABLE);
+ }
+ } else
+ printk(KERN_WARNING "Wild interrupt from backplane (masks)\n");
+}
+
+static int __init ecard_probeirqhw(void)
+{
+ ecard_t *ec;
+ int found;
+
+ __raw_writeb(0x00, EXPMASK_ENABLE);
+ __raw_writeb(0xff, EXPMASK_STATUS);
+ found = (__raw_readb(EXPMASK_STATUS) & 15) == 0;
+ __raw_writeb(0xff, EXPMASK_ENABLE);
+
+ if (found) {
+ printk(KERN_DEBUG "Expansion card interrupt "
+ "management hardware found\n");
+
+ /* for each card present, set a bit to '1' */
+ have_expmask = 0x80000000;
+
+ for (ec = cards; ec; ec = ec->next)
+ have_expmask |= 1 << ec->slot_no;
+
+ __raw_writeb(have_expmask, EXPMASK_ENABLE);
+ }
+
+ return found;
+}
+#else
+#define ecard_irqexp_handler NULL
+#define ecard_probeirqhw() (0)
+#endif
+
+#ifndef IO_EC_MEMC8_BASE
+#define IO_EC_MEMC8_BASE 0
+#endif
+
+unsigned int __ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed)
+{
+ unsigned long address = 0;
+ int slot = ec->slot_no;
+
+ if (ec->slot_no == 8)
+ return IO_EC_MEMC8_BASE;
+
+ ectcr &= ~(1 << slot);
+
+ switch (type) {
+ case ECARD_MEMC:
+ if (slot < 4)
+ address = IO_EC_MEMC_BASE + (slot << 12);
+ break;
+
+ case ECARD_IOC:
+ if (slot < 4)
+ address = IO_EC_IOC_BASE + (slot << 12);
+#ifdef IO_EC_IOC4_BASE
+ else
+ address = IO_EC_IOC4_BASE + ((slot - 4) << 12);
+#endif
+ if (address)
+ address += speed << 17;
+ break;
+
+#ifdef IO_EC_EASI_BASE
+ case ECARD_EASI:
+ address = IO_EC_EASI_BASE + (slot << 22);
+ if (speed == ECARD_FAST)
+ ectcr |= 1 << slot;
+ break;
+#endif
+ default:
+ break;
+ }
+
+#ifdef IOMD_ECTCR
+ iomd_writeb(ectcr, IOMD_ECTCR);
+#endif
+ return address;
+}
+
+static int ecard_prints(char *buffer, ecard_t *ec)
+{
+ char *start = buffer;
+
+ buffer += sprintf(buffer, " %d: %s ", ec->slot_no,
+ ec->type == ECARD_EASI ? "EASI" : " ");
+
+ if (ec->cid.id == 0) {
+ struct in_chunk_dir incd;
+
+ buffer += sprintf(buffer, "[%04X:%04X] ",
+ ec->cid.manufacturer, ec->cid.product);
+
+ if (!ec->card_desc && ec->cid.cd &&
+ ecard_readchunk(&incd, ec, 0xf5, 0)) {
+ ec->card_desc = kmalloc(strlen(incd.d.string)+1, GFP_KERNEL);
+
+ if (ec->card_desc)
+ strcpy((char *)ec->card_desc, incd.d.string);
+ }
+
+ buffer += sprintf(buffer, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*");
+ } else
+ buffer += sprintf(buffer, "Simple card %d\n", ec->cid.id);
+
+ return buffer - start;
+}
+
+static int get_ecard_dev_info(char *buf, char **start, off_t pos, int count)
+{
+ ecard_t *ec = cards;
+ off_t at = 0;
+ int len, cnt;
+
+ cnt = 0;
+ while (ec && count > cnt) {
+ len = ecard_prints(buf, ec);
+ at += len;
+ if (at >= pos) {
+ if (!*start) {
+ *start = buf + (pos - (at - len));
+ cnt = at - pos;
+ } else
+ cnt += len;
+ buf += len;
+ }
+ ec = ec->next;
+ }
+ return (count > cnt) ? cnt : count;
+}
+
+static struct proc_dir_entry *proc_bus_ecard_dir = NULL;
+
+static void ecard_proc_init(void)
+{
+ proc_bus_ecard_dir = proc_mkdir("ecard", proc_bus);
+ create_proc_info_entry("devices", 0, proc_bus_ecard_dir,
+ get_ecard_dev_info);
+}
+
+#define ec_set_resource(ec,nr,st,sz) \
+ do { \
+ (ec)->resource[nr].name = ec->dev.bus_id; \
+ (ec)->resource[nr].start = st; \
+ (ec)->resource[nr].end = (st) + (sz) - 1; \
+ (ec)->resource[nr].flags = IORESOURCE_MEM; \
+ } while (0)
+
+static void __init ecard_free_card(struct expansion_card *ec)
+{
+ int i;
+
+ for (i = 0; i < ECARD_NUM_RESOURCES; i++)
+ if (ec->resource[i].flags)
+ release_resource(&ec->resource[i]);
+
+ kfree(ec);
+}
+
+static struct expansion_card *__init ecard_alloc_card(int type, int slot)
+{
+ struct expansion_card *ec;
+ unsigned long base;
+ int i;
+
+ ec = kmalloc(sizeof(ecard_t), GFP_KERNEL);
+ if (!ec) {
+ ec = ERR_PTR(-ENOMEM);
+ goto nomem;
+ }
+
+ memset(ec, 0, sizeof(ecard_t));
+
+ ec->slot_no = slot;
+ ec->type = type;
+ ec->irq = NO_IRQ;
+ ec->fiq = NO_IRQ;
+ ec->dma = NO_DMA;
+ ec->ops = &ecard_default_ops;
+
+ snprintf(ec->dev.bus_id, sizeof(ec->dev.bus_id), "ecard%d", slot);
+ ec->dev.parent = NULL;
+ ec->dev.bus = &ecard_bus_type;
+ ec->dev.dma_mask = &ec->dma_mask;
+ ec->dma_mask = (u64)0xffffffff;
+
+ if (slot < 4) {
+ ec_set_resource(ec, ECARD_RES_MEMC,
+ PODSLOT_MEMC_BASE + (slot << 14),
+ PODSLOT_MEMC_SIZE);
+ base = PODSLOT_IOC0_BASE + (slot << 14);
+ } else
+ base = PODSLOT_IOC4_BASE + ((slot - 4) << 14);
+
+#ifdef CONFIG_ARCH_RPC
+ if (slot < 8) {
+ ec_set_resource(ec, ECARD_RES_EASI,
+ PODSLOT_EASI_BASE + (slot << 24),
+ PODSLOT_EASI_SIZE);
+ }
+
+ if (slot == 8) {
+ ec_set_resource(ec, ECARD_RES_MEMC, NETSLOT_BASE, NETSLOT_SIZE);
+ } else
+#endif
+
+ for (i = 0; i <= ECARD_RES_IOCSYNC - ECARD_RES_IOCSLOW; i++)
+ ec_set_resource(ec, i + ECARD_RES_IOCSLOW,
+ base + (i << 19), PODSLOT_IOC_SIZE);
+
+ for (i = 0; i < ECARD_NUM_RESOURCES; i++) {
+ if (ec->resource[i].flags &&
+ request_resource(&iomem_resource, &ec->resource[i])) {
+ printk(KERN_ERR "%s: resource(s) not available\n",
+ ec->dev.bus_id);
+ ec->resource[i].end -= ec->resource[i].start;
+ ec->resource[i].start = 0;
+ ec->resource[i].flags = 0;
+ }
+ }
+
+ nomem:
+ return ec;
+}
+
+static ssize_t ecard_show_irq(struct device *dev, char *buf)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ return sprintf(buf, "%u\n", ec->irq);
+}
+
+static ssize_t ecard_show_dma(struct device *dev, char *buf)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ return sprintf(buf, "%u\n", ec->dma);
+}
+
+static ssize_t ecard_show_resources(struct device *dev, char *buf)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ char *str = buf;
+ int i;
+
+ for (i = 0; i < ECARD_NUM_RESOURCES; i++)
+ str += sprintf(str, "%08lx %08lx %08lx\n",
+ ec->resource[i].start,
+ ec->resource[i].end,
+ ec->resource[i].flags);
+
+ return str - buf;
+}
+
+static ssize_t ecard_show_vendor(struct device *dev, char *buf)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ return sprintf(buf, "%u\n", ec->cid.manufacturer);
+}
+
+static ssize_t ecard_show_device(struct device *dev, char *buf)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ return sprintf(buf, "%u\n", ec->cid.product);
+}
+
+static ssize_t ecard_show_type(struct device *dev, char *buf)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ return sprintf(buf, "%s\n", ec->type == ECARD_EASI ? "EASI" : "IOC");
+}
+
+static struct device_attribute ecard_dev_attrs[] = {
+ __ATTR(device, S_IRUGO, ecard_show_device, NULL),
+ __ATTR(dma, S_IRUGO, ecard_show_dma, NULL),
+ __ATTR(irq, S_IRUGO, ecard_show_irq, NULL),
+ __ATTR(resource, S_IRUGO, ecard_show_resources, NULL),
+ __ATTR(type, S_IRUGO, ecard_show_type, NULL),
+ __ATTR(vendor, S_IRUGO, ecard_show_vendor, NULL),
+ __ATTR_NULL,
+};
+
+
+int ecard_request_resources(struct expansion_card *ec)
+{
+ int i, err = 0;
+
+ for (i = 0; i < ECARD_NUM_RESOURCES; i++) {
+ if (ecard_resource_end(ec, i) &&
+ !request_mem_region(ecard_resource_start(ec, i),
+ ecard_resource_len(ec, i),
+ ec->dev.driver->name)) {
+ err = -EBUSY;
+ break;
+ }
+ }
+
+ if (err) {
+ while (i--)
+ if (ecard_resource_end(ec, i))
+ release_mem_region(ecard_resource_start(ec, i),
+ ecard_resource_len(ec, i));
+ }
+ return err;
+}
+EXPORT_SYMBOL(ecard_request_resources);
+
+void ecard_release_resources(struct expansion_card *ec)
+{
+ int i;
+
+ for (i = 0; i < ECARD_NUM_RESOURCES; i++)
+ if (ecard_resource_end(ec, i))
+ release_mem_region(ecard_resource_start(ec, i),
+ ecard_resource_len(ec, i));
+}
+EXPORT_SYMBOL(ecard_release_resources);
+
+/*
+ * Probe for an expansion card.
+ *
+ * If bit 1 of the first byte of the card is set, then the
+ * card does not exist.
+ */
+static int __init
+ecard_probe(int slot, card_type_t type)
+{
+ ecard_t **ecp;
+ ecard_t *ec;
+ struct ex_ecid cid;
+ int i, rc;
+
+ ec = ecard_alloc_card(type, slot);
+ if (IS_ERR(ec)) {
+ rc = PTR_ERR(ec);
+ goto nomem;
+ }
+
+ rc = -ENODEV;
+ if ((ec->podaddr = ecard_address(ec, type, ECARD_SYNC)) == 0)
+ goto nodev;
+
+ cid.r_zero = 1;
+ ecard_readbytes(&cid, ec, 0, 16, 0);
+ if (cid.r_zero)
+ goto nodev;
+
+ ec->cid.id = cid.r_id;
+ ec->cid.cd = cid.r_cd;
+ ec->cid.is = cid.r_is;
+ ec->cid.w = cid.r_w;
+ ec->cid.manufacturer = ecard_getu16(cid.r_manu);
+ ec->cid.product = ecard_getu16(cid.r_prod);
+ ec->cid.country = cid.r_country;
+ ec->cid.irqmask = cid.r_irqmask;
+ ec->cid.irqoff = ecard_gets24(cid.r_irqoff);
+ ec->cid.fiqmask = cid.r_fiqmask;
+ ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff);
+ ec->fiqaddr =
+ ec->irqaddr = (void __iomem *)ioaddr(ec->podaddr);
+
+ if (ec->cid.is) {
+ ec->irqmask = ec->cid.irqmask;
+ ec->irqaddr += ec->cid.irqoff;
+ ec->fiqmask = ec->cid.fiqmask;
+ ec->fiqaddr += ec->cid.fiqoff;
+ } else {
+ ec->irqmask = 1;
+ ec->fiqmask = 4;
+ }
+
+ for (i = 0; i < sizeof(blacklist) / sizeof(*blacklist); i++)
+ if (blacklist[i].manufacturer == ec->cid.manufacturer &&
+ blacklist[i].product == ec->cid.product) {
+ ec->card_desc = blacklist[i].type;
+ break;
+ }
+
+ /*
+ * hook the interrupt handlers
+ */
+ if (slot < 8) {
+ ec->irq = 32 + slot;
+ set_irq_chip(ec->irq, &ecard_chip);
+ set_irq_handler(ec->irq, do_level_IRQ);
+ set_irq_flags(ec->irq, IRQF_VALID);
+ }
+
+#ifdef IO_EC_MEMC8_BASE
+ if (slot == 8)
+ ec->irq = 11;
+#endif
+#ifdef CONFIG_ARCH_RPC
+ /* On RiscPC, only first two slots have DMA capability */
+ if (slot < 2)
+ ec->dma = 2 + slot;
+#endif
+
+ for (ecp = &cards; *ecp; ecp = &(*ecp)->next);
+
+ *ecp = ec;
+ slot_to_expcard[slot] = ec;
+
+ device_register(&ec->dev);
+
+ return 0;
+
+ nodev:
+ ecard_free_card(ec);
+ nomem:
+ return rc;
+}
+
+/*
+ * Initialise the expansion card system.
+ * Locate all hardware - interrupt management and
+ * actual cards.
+ */
+static int __init ecard_init(void)
+{
+ int slot, irqhw, ret;
+
+ ret = kernel_thread(ecard_task, NULL, CLONE_KERNEL);
+ if (ret < 0) {
+ printk(KERN_ERR "Ecard: unable to create kernel thread: %d\n",
+ ret);
+ return ret;
+ }
+
+ printk("Probing expansion cards\n");
+
+ for (slot = 0; slot < 8; slot ++) {
+ if (ecard_probe(slot, ECARD_EASI) == -ENODEV)
+ ecard_probe(slot, ECARD_IOC);
+ }
+
+#ifdef IO_EC_MEMC8_BASE
+ ecard_probe(8, ECARD_IOC);
+#endif
+
+ irqhw = ecard_probeirqhw();
+
+ set_irq_chained_handler(IRQ_EXPANSIONCARD,
+ irqhw ? ecard_irqexp_handler : ecard_irq_handler);
+
+ ecard_proc_init();
+
+ return 0;
+}
+
+subsys_initcall(ecard_init);
+
+/*
+ * ECARD "bus"
+ */
+static const struct ecard_id *
+ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec)
+{
+ int i;
+
+ for (i = 0; ids[i].manufacturer != 65535; i++)
+ if (ec->cid.manufacturer == ids[i].manufacturer &&
+ ec->cid.product == ids[i].product)
+ return ids + i;
+
+ return NULL;
+}
+
+static int ecard_drv_probe(struct device *dev)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ struct ecard_driver *drv = ECARD_DRV(dev->driver);
+ const struct ecard_id *id;
+ int ret;
+
+ id = ecard_match_device(drv->id_table, ec);
+
+ ecard_claim(ec);
+ ret = drv->probe(ec, id);
+ if (ret)
+ ecard_release(ec);
+ return ret;
+}
+
+static int ecard_drv_remove(struct device *dev)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ struct ecard_driver *drv = ECARD_DRV(dev->driver);
+
+ drv->remove(ec);
+ ecard_release(ec);
+
+ return 0;
+}
+
+/*
+ * Before rebooting, we must make sure that the expansion card is in a
+ * sensible state, so it can be re-detected. This means that the first
+ * page of the ROM must be visible. We call the expansion cards reset
+ * handler, if any.
+ */
+static void ecard_drv_shutdown(struct device *dev)
+{
+ struct expansion_card *ec = ECARD_DEV(dev);
+ struct ecard_driver *drv = ECARD_DRV(dev->driver);
+ struct ecard_request req;
+
+ if (drv->shutdown)
+ drv->shutdown(ec);
+ ecard_release(ec);
+
+ /*
+ * If this card has a loader, call the reset handler.
+ */
+ if (ec->loader) {
+ req.fn = ecard_task_reset;
+ req.ec = ec;
+ ecard_call(&req);
+ }
+}
+
+int ecard_register_driver(struct ecard_driver *drv)
+{
+ drv->drv.bus = &ecard_bus_type;
+ drv->drv.probe = ecard_drv_probe;
+ drv->drv.remove = ecard_drv_remove;
+ drv->drv.shutdown = ecard_drv_shutdown;
+
+ return driver_register(&drv->drv);
+}
+
+void ecard_remove_driver(struct ecard_driver *drv)
+{
+ driver_unregister(&drv->drv);
+}
+
+static int ecard_match(struct device *_dev, struct device_driver *_drv)
+{
+ struct expansion_card *ec = ECARD_DEV(_dev);
+ struct ecard_driver *drv = ECARD_DRV(_drv);
+ int ret;
+
+ if (drv->id_table) {
+ ret = ecard_match_device(drv->id_table, ec) != NULL;
+ } else {
+ ret = ec->cid.id == drv->id;
+ }
+
+ return ret;
+}
+
+struct bus_type ecard_bus_type = {
+ .name = "ecard",
+ .dev_attrs = ecard_dev_attrs,
+ .match = ecard_match,
+};
+
+static int ecard_bus_init(void)
+{
+ return bus_register(&ecard_bus_type);
+}
+
+postcore_initcall(ecard_bus_init);
+
+EXPORT_SYMBOL(ecard_readchunk);
+EXPORT_SYMBOL(__ecard_address);
+EXPORT_SYMBOL(ecard_register_driver);
+EXPORT_SYMBOL(ecard_remove_driver);
+EXPORT_SYMBOL(ecard_bus_type);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
new file mode 100644
index 00000000000..bb27c317d94
--- /dev/null
+++ b/arch/arm/kernel/entry-armv.S
@@ -0,0 +1,745 @@
+/*
+ * linux/arch/arm/kernel/entry-armv.S
+ *
+ * Copyright (C) 1996,1997,1998 Russell King.
+ * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Low-level vector interface routines
+ *
+ * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
+ * it to save wrong values... Be aware!
+ */
+#include <linux/config.h>
+#include <linux/init.h>
+
+#include <asm/thread_info.h>
+#include <asm/glue.h>
+#include <asm/ptrace.h>
+#include <asm/vfpmacros.h>
+
+#include "entry-header.S"
+
+/*
+ * Invalid mode handlers
+ */
+ .macro inv_entry, sym, reason
+ sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
+ stmia sp, {r0 - lr} @ Save XXX r0 - lr
+ ldr r4, .LC\sym
+ mov r1, #\reason
+ .endm
+
+__pabt_invalid:
+ inv_entry abt, BAD_PREFETCH
+ b 1f
+
+__dabt_invalid:
+ inv_entry abt, BAD_DATA
+ b 1f
+
+__irq_invalid:
+ inv_entry irq, BAD_IRQ
+ b 1f
+
+__und_invalid:
+ inv_entry und, BAD_UNDEFINSTR
+
+1: zero_fp
+ ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0
+ add r4, sp, #S_PC
+ stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0
+ mov r0, sp
+ and r2, r6, #31 @ int mode
+ b bad_mode
+
+/*
+ * SVC mode handlers
+ */
+ .macro svc_entry, sym
+ sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ save r0 - r12
+ ldr r2, .LC\sym
+ add r0, sp, #S_FRAME_SIZE
+ ldmia r2, {r2 - r4} @ get pc, cpsr
+ add r5, sp, #S_SP
+ mov r1, lr
+
+ @
+ @ We are now ready to fill in the remaining blanks on the stack:
+ @
+ @ r0 - sp_svc
+ @ r1 - lr_svc
+ @ r2 - lr_<exception>, already fixed up for correct return/restart
+ @ r3 - spsr_<exception>
+ @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
+ @
+ stmia r5, {r0 - r4}
+ .endm
+
+ .align 5
+__dabt_svc:
+ svc_entry abt
+
+ @
+ @ get ready to re-enable interrupts if appropriate
+ @
+ mrs r9, cpsr
+ tst r3, #PSR_I_BIT
+ biceq r9, r9, #PSR_I_BIT
+
+ @
+ @ Call the processor-specific abort handler:
+ @
+ @ r2 - aborted context pc
+ @ r3 - aborted context cpsr
+ @
+ @ The abort handler must return the aborted address in r0, and
+ @ the fault status register in r1. r9 must be preserved.
+ @
+#ifdef MULTI_ABORT
+ ldr r4, .LCprocfns
+ mov lr, pc
+ ldr pc, [r4]
+#else
+ bl CPU_ABORT_HANDLER
+#endif
+
+ @
+ @ set desired IRQ state, then call main handler
+ @
+ msr cpsr_c, r9
+ mov r2, sp
+ bl do_DataAbort
+
+ @
+ @ IRQs off again before pulling preserved data off the stack
+ @
+ disable_irq r0
+
+ @
+ @ restore SPSR and restart the instruction
+ @
+ ldr r0, [sp, #S_PSR]
+ msr spsr_cxsf, r0
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+
+ .align 5
+__irq_svc:
+ svc_entry irq
+#ifdef CONFIG_PREEMPT
+ get_thread_info r8
+ ldr r9, [r8, #TI_PREEMPT] @ get preempt count
+ add r7, r9, #1 @ increment it
+ str r7, [r8, #TI_PREEMPT]
+#endif
+1: get_irqnr_and_base r0, r6, r5, lr
+ movne r1, sp
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ adrne lr, 1b
+ bne asm_do_IRQ
+#ifdef CONFIG_PREEMPT
+ ldr r0, [r8, #TI_FLAGS] @ get flags
+ tst r0, #_TIF_NEED_RESCHED
+ blne svc_preempt
+preempt_return:
+ ldr r0, [r8, #TI_PREEMPT] @ read preempt value
+ teq r0, r7
+ str r9, [r8, #TI_PREEMPT] @ restore preempt count
+ strne r0, [r0, -r0] @ bug()
+#endif
+ ldr r0, [sp, #S_PSR] @ irqs are already disabled
+ msr spsr_cxsf, r0
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+
+ .ltorg
+
+#ifdef CONFIG_PREEMPT
+svc_preempt:
+ teq r9, #0 @ was preempt count = 0
+ ldreq r6, .LCirq_stat
+ movne pc, lr @ no
+ ldr r0, [r6, #4] @ local_irq_count
+ ldr r1, [r6, #8] @ local_bh_count
+ adds r0, r0, r1
+ movne pc, lr
+ mov r7, #0 @ preempt_schedule_irq
+ str r7, [r8, #TI_PREEMPT] @ expects preempt_count == 0
+1: bl preempt_schedule_irq @ irq en/disable is done inside
+ ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS
+ tst r0, #_TIF_NEED_RESCHED
+ beq preempt_return @ go again
+ b 1b
+#endif
+
+ .align 5
+__und_svc:
+ svc_entry und
+
+ @
+ @ call emulation code, which returns using r9 if it has emulated
+ @ the instruction, or the more conventional lr if we are to treat
+ @ this as a real undefined instruction
+ @
+ @ r0 - instruction
+ @
+ ldr r0, [r2, #-4]
+ adr r9, 1f
+ bl call_fpe
+
+ mov r0, sp @ struct pt_regs *regs
+ bl do_undefinstr
+
+ @
+ @ IRQs off again before pulling preserved data off the stack
+ @
+1: disable_irq r0
+
+ @
+ @ restore SPSR and restart the instruction
+ @
+ ldr lr, [sp, #S_PSR] @ Get SVC cpsr
+ msr spsr_cxsf, lr
+ ldmia sp, {r0 - pc}^ @ Restore SVC registers
+
+ .align 5
+__pabt_svc:
+ svc_entry abt
+
+ @
+ @ re-enable interrupts if appropriate
+ @
+ mrs r9, cpsr
+ tst r3, #PSR_I_BIT
+ biceq r9, r9, #PSR_I_BIT
+ msr cpsr_c, r9
+
+ @
+ @ set args, then call main handler
+ @
+ @ r0 - address of faulting instruction
+ @ r1 - pointer to registers on stack
+ @
+ mov r0, r2 @ address (pc)
+ mov r1, sp @ regs
+ bl do_PrefetchAbort @ call abort handler
+
+ @
+ @ IRQs off again before pulling preserved data off the stack
+ @
+ disable_irq r0
+
+ @
+ @ restore SPSR and restart the instruction
+ @
+ ldr r0, [sp, #S_PSR]
+ msr spsr_cxsf, r0
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+
+ .align 5
+.LCirq:
+ .word __temp_irq
+.LCund:
+ .word __temp_und
+.LCabt:
+ .word __temp_abt
+#ifdef MULTI_ABORT
+.LCprocfns:
+ .word processor
+#endif
+.LCfp:
+ .word fp_enter
+#ifdef CONFIG_PREEMPT
+.LCirq_stat:
+ .word irq_stat
+#endif
+
+/*
+ * User mode handlers
+ */
+ .macro usr_entry, sym
+ sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
+ stmia sp, {r0 - r12} @ save r0 - r12
+ ldr r7, .LC\sym
+ add r5, sp, #S_PC
+ ldmia r7, {r2 - r4} @ Get USR pc, cpsr
+
+ @
+ @ We are now ready to fill in the remaining blanks on the stack:
+ @
+ @ r2 - lr_<exception>, already fixed up for correct return/restart
+ @ r3 - spsr_<exception>
+ @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
+ @
+ @ Also, separately save sp_usr and lr_usr
+ @
+ stmia r5, {r2 - r4}
+ stmdb r5, {sp, lr}^
+
+ @
+ @ Enable the alignment trap while in kernel mode
+ @
+ alignment_trap r7, r0, __temp_\sym
+
+ @
+ @ Clear FP to mark the first stack frame
+ @
+ zero_fp
+ .endm
+
+ .align 5
+__dabt_usr:
+ usr_entry abt
+
+ @
+ @ Call the processor-specific abort handler:
+ @
+ @ r2 - aborted context pc
+ @ r3 - aborted context cpsr
+ @
+ @ The abort handler must return the aborted address in r0, and
+ @ the fault status register in r1.
+ @
+#ifdef MULTI_ABORT
+ ldr r4, .LCprocfns
+ mov lr, pc
+ ldr pc, [r4]
+#else
+ bl CPU_ABORT_HANDLER
+#endif
+
+ @
+ @ IRQs on, then call the main handler
+ @
+ enable_irq r2
+ mov r2, sp
+ adr lr, ret_from_exception
+ b do_DataAbort
+
+ .align 5
+__irq_usr:
+ usr_entry irq
+
+#ifdef CONFIG_PREEMPT
+ get_thread_info r8
+ ldr r9, [r8, #TI_PREEMPT] @ get preempt count
+ add r7, r9, #1 @ increment it
+ str r7, [r8, #TI_PREEMPT]
+#endif
+1: get_irqnr_and_base r0, r6, r5, lr
+ movne r1, sp
+ adrne lr, 1b
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ bne asm_do_IRQ
+#ifdef CONFIG_PREEMPT
+ ldr r0, [r8, #TI_PREEMPT]
+ teq r0, r7
+ str r9, [r8, #TI_PREEMPT]
+ strne r0, [r0, -r0]
+ mov tsk, r8
+#else
+ get_thread_info tsk
+#endif
+ mov why, #0
+ b ret_to_user
+
+ .ltorg
+
+ .align 5
+__und_usr:
+ usr_entry und
+
+ tst r3, #PSR_T_BIT @ Thumb mode?
+ bne fpundefinstr @ ignore FP
+ sub r4, r2, #4
+
+ @
+ @ fall through to the emulation code, which returns using r9 if
+ @ it has emulated the instruction, or the more conventional lr
+ @ if we are to treat this as a real undefined instruction
+ @
+ @ r0 - instruction
+ @
+1: ldrt r0, [r4]
+ adr r9, ret_from_exception
+ adr lr, fpundefinstr
+ @
+ @ fallthrough to call_fpe
+ @
+
+/*
+ * The out of line fixup for the ldrt above.
+ */
+ .section .fixup, "ax"
+2: mov pc, r9
+ .previous
+ .section __ex_table,"a"
+ .long 1b, 2b
+ .previous
+
+/*
+ * Check whether the instruction is a co-processor instruction.
+ * If yes, we need to call the relevant co-processor handler.
+ *
+ * Note that we don't do a full check here for the co-processor
+ * instructions; all instructions with bit 27 set are well
+ * defined. The only instructions that should fault are the
+ * co-processor instructions. However, we have to watch out
+ * for the ARM6/ARM7 SWI bug.
+ *
+ * Emulators may wish to make use of the following registers:
+ * r0 = instruction opcode.
+ * r2 = PC+4
+ * r10 = this threads thread_info structure.
+ */
+call_fpe:
+ tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
+#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
+ and r8, r0, #0x0f000000 @ mask out op-code bits
+ teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
+#endif
+ moveq pc, lr
+ get_thread_info r10 @ get current thread
+ and r8, r0, #0x00000f00 @ mask out CP number
+ mov r7, #1
+ add r6, r10, #TI_USED_CP
+ strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
+#ifdef CONFIG_IWMMXT
+ @ Test if we need to give access to iWMMXt coprocessors
+ ldr r5, [r10, #TI_FLAGS]
+ rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
+ movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
+ bcs iwmmxt_task_enable
+#endif
+ enable_irq r7
+ add pc, pc, r8, lsr #6
+ mov r0, r0
+
+ mov pc, lr @ CP#0
+ b do_fpe @ CP#1 (FPE)
+ b do_fpe @ CP#2 (FPE)
+ mov pc, lr @ CP#3
+ mov pc, lr @ CP#4
+ mov pc, lr @ CP#5
+ mov pc, lr @ CP#6
+ mov pc, lr @ CP#7
+ mov pc, lr @ CP#8
+ mov pc, lr @ CP#9
+#ifdef CONFIG_VFP
+ b do_vfp @ CP#10 (VFP)
+ b do_vfp @ CP#11 (VFP)
+#else
+ mov pc, lr @ CP#10 (VFP)
+ mov pc, lr @ CP#11 (VFP)
+#endif
+ mov pc, lr @ CP#12
+ mov pc, lr @ CP#13
+ mov pc, lr @ CP#14 (Debug)
+ mov pc, lr @ CP#15 (Control)
+
+do_fpe:
+ ldr r4, .LCfp
+ add r10, r10, #TI_FPSTATE @ r10 = workspace
+ ldr pc, [r4] @ Call FP module USR entry point
+
+/*
+ * The FP module is called with these registers set:
+ * r0 = instruction
+ * r2 = PC+4
+ * r9 = normal "successful" return address
+ * r10 = FP workspace
+ * lr = unrecognised FP instruction return address
+ */
+
+ .data
+ENTRY(fp_enter)
+ .word fpundefinstr
+ .text
+
+fpundefinstr:
+ mov r0, sp
+ adr lr, ret_from_exception
+ b do_undefinstr
+
+ .align 5
+__pabt_usr:
+ usr_entry abt
+
+ enable_irq r0 @ Enable interrupts
+ mov r0, r2 @ address (pc)
+ mov r1, sp @ regs
+ bl do_PrefetchAbort @ call abort handler
+ /* fall through */
+/*
+ * This is the return code to user mode for abort handlers
+ */
+ENTRY(ret_from_exception)
+ get_thread_info tsk
+ mov why, #0
+ b ret_to_user
+
+/*
+ * Register switch for ARMv3 and ARMv4 processors
+ * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
+ * previous and next are guaranteed not to be the same.
+ */
+ENTRY(__switch_to)
+ add ip, r1, #TI_CPU_SAVE
+ ldr r3, [r2, #TI_TP_VALUE]
+ stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
+ ldr r6, [r2, #TI_CPU_DOMAIN]!
+#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
+ mra r4, r5, acc0
+ stmia ip, {r4, r5}
+#endif
+ mov r4, #0xffff0fff
+ str r3, [r4, #-3] @ Set TLS ptr
+ mcr p15, 0, r6, c3, c0, 0 @ Set domain register
+#ifdef CONFIG_VFP
+ @ Always disable VFP so we can lazily save/restore the old
+ @ state. This occurs in the context of the previous thread.
+ VFPFMRX r4, FPEXC
+ bic r4, r4, #FPEXC_ENABLE
+ VFPFMXR FPEXC, r4
+#endif
+#if defined(CONFIG_IWMMXT)
+ bl iwmmxt_task_switch
+#elif defined(CONFIG_CPU_XSCALE)
+ add r4, r2, #40 @ cpu_context_save->extra
+ ldmib r4, {r4, r5}
+ mar acc0, r4, r5
+#endif
+ ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
+
+ __INIT
+/*
+ * Vector stubs.
+ *
+ * This code is copied to 0x200 or 0xffff0200 so we can use branches in the
+ * vectors, rather than ldr's.
+ *
+ * Common stub entry macro:
+ * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
+ */
+ .macro vector_stub, name, sym, correction=0
+ .align 5
+
+vector_\name:
+ ldr r13, .LCs\sym
+ .if \correction
+ sub lr, lr, #\correction
+ .endif
+ str lr, [r13] @ save lr_IRQ
+ mrs lr, spsr
+ str lr, [r13, #4] @ save spsr_IRQ
+ @
+ @ now branch to the relevant MODE handling routine
+ @
+ mrs r13, cpsr
+ bic r13, r13, #MODE_MASK
+ orr r13, r13, #MODE_SVC
+ msr spsr_cxsf, r13 @ switch to SVC_32 mode
+
+ and lr, lr, #15
+ ldr lr, [pc, lr, lsl #2]
+ movs pc, lr @ Changes mode and branches
+ .endm
+
+__stubs_start:
+/*
+ * Interrupt dispatcher
+ */
+ vector_stub irq, irq, 4
+
+ .long __irq_usr @ 0 (USR_26 / USR_32)
+ .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
+ .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
+ .long __irq_svc @ 3 (SVC_26 / SVC_32)
+ .long __irq_invalid @ 4
+ .long __irq_invalid @ 5
+ .long __irq_invalid @ 6
+ .long __irq_invalid @ 7
+ .long __irq_invalid @ 8
+ .long __irq_invalid @ 9
+ .long __irq_invalid @ a
+ .long __irq_invalid @ b
+ .long __irq_invalid @ c
+ .long __irq_invalid @ d
+ .long __irq_invalid @ e
+ .long __irq_invalid @ f
+
+/*
+ * Data abort dispatcher
+ * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
+ */
+ vector_stub dabt, abt, 8
+
+ .long __dabt_usr @ 0 (USR_26 / USR_32)
+ .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
+ .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
+ .long __dabt_svc @ 3 (SVC_26 / SVC_32)
+ .long __dabt_invalid @ 4
+ .long __dabt_invalid @ 5
+ .long __dabt_invalid @ 6
+ .long __dabt_invalid @ 7
+ .long __dabt_invalid @ 8
+ .long __dabt_invalid @ 9
+ .long __dabt_invalid @ a
+ .long __dabt_invalid @ b
+ .long __dabt_invalid @ c
+ .long __dabt_invalid @ d
+ .long __dabt_invalid @ e
+ .long __dabt_invalid @ f
+
+/*
+ * Prefetch abort dispatcher
+ * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
+ */
+ vector_stub pabt, abt, 4
+
+ .long __pabt_usr @ 0 (USR_26 / USR_32)
+ .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
+ .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
+ .long __pabt_svc @ 3 (SVC_26 / SVC_32)
+ .long __pabt_invalid @ 4
+ .long __pabt_invalid @ 5
+ .long __pabt_invalid @ 6
+ .long __pabt_invalid @ 7
+ .long __pabt_invalid @ 8
+ .long __pabt_invalid @ 9
+ .long __pabt_invalid @ a
+ .long __pabt_invalid @ b
+ .long __pabt_invalid @ c
+ .long __pabt_invalid @ d
+ .long __pabt_invalid @ e
+ .long __pabt_invalid @ f
+
+/*
+ * Undef instr entry dispatcher
+ * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
+ */
+ vector_stub und, und
+
+ .long __und_usr @ 0 (USR_26 / USR_32)
+ .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
+ .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
+ .long __und_svc @ 3 (SVC_26 / SVC_32)
+ .long __und_invalid @ 4
+ .long __und_invalid @ 5
+ .long __und_invalid @ 6
+ .long __und_invalid @ 7
+ .long __und_invalid @ 8
+ .long __und_invalid @ 9
+ .long __und_invalid @ a
+ .long __und_invalid @ b
+ .long __und_invalid @ c
+ .long __und_invalid @ d
+ .long __und_invalid @ e
+ .long __und_invalid @ f
+
+ .align 5
+
+/*=============================================================================
+ * Undefined FIQs
+ *-----------------------------------------------------------------------------
+ * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
+ * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
+ * Basically to switch modes, we *HAVE* to clobber one register... brain
+ * damage alert! I don't think that we can execute any code in here in any
+ * other mode than FIQ... Ok you can switch to another mode, but you can't
+ * get out of that mode without clobbering one register.
+ */
+vector_fiq:
+ disable_fiq
+ subs pc, lr, #4
+
+/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen, and won't happen in 32-bit data mode).
+ */
+
+vector_addrexcptn:
+ b vector_addrexcptn
+
+/*
+ * We group all the following data together to optimise
+ * for CPUs with separate I & D caches.
+ */
+ .align 5
+
+.LCvswi:
+ .word vector_swi
+
+.LCsirq:
+ .word __temp_irq
+.LCsund:
+ .word __temp_und
+.LCsabt:
+ .word __temp_abt
+
+__stubs_end:
+
+ .equ __real_stubs_start, .LCvectors + 0x200
+
+.LCvectors:
+ swi SYS_ERROR0
+ b __real_stubs_start + (vector_und - __stubs_start)
+ ldr pc, __real_stubs_start + (.LCvswi - __stubs_start)
+ b __real_stubs_start + (vector_pabt - __stubs_start)
+ b __real_stubs_start + (vector_dabt - __stubs_start)
+ b __real_stubs_start + (vector_addrexcptn - __stubs_start)
+ b __real_stubs_start + (vector_irq - __stubs_start)
+ b __real_stubs_start + (vector_fiq - __stubs_start)
+
+ENTRY(__trap_init)
+ stmfd sp!, {r4 - r6, lr}
+
+ mov r0, #0xff000000
+ orr r0, r0, #0x00ff0000 @ high vectors position
+ adr r1, .LCvectors @ set up the vectors
+ ldmia r1, {r1, r2, r3, r4, r5, r6, ip, lr}
+ stmia r0, {r1, r2, r3, r4, r5, r6, ip, lr}
+
+ add r2, r0, #0x200
+ adr r0, __stubs_start @ copy stubs to 0x200
+ adr r1, __stubs_end
+1: ldr r3, [r0], #4
+ str r3, [r2], #4
+ cmp r0, r1
+ blt 1b
+ LOADREGS(fd, sp!, {r4 - r6, pc})
+
+ .data
+
+/*
+ * Do not reorder these, and do not insert extra data between...
+ */
+
+__temp_irq:
+ .word 0 @ saved lr_irq
+ .word 0 @ saved spsr_irq
+ .word -1 @ old_r0
+__temp_und:
+ .word 0 @ Saved lr_und
+ .word 0 @ Saved spsr_und
+ .word -1 @ old_r0
+__temp_abt:
+ .word 0 @ Saved lr_abt
+ .word 0 @ Saved spsr_abt
+ .word -1 @ old_r0
+
+ .globl cr_alignment
+ .globl cr_no_alignment
+cr_alignment:
+ .space 4
+cr_no_alignment:
+ .space 4
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
new file mode 100644
index 00000000000..53a7e0dea44
--- /dev/null
+++ b/arch/arm/kernel/entry-common.S
@@ -0,0 +1,260 @@
+/*
+ * linux/arch/arm/kernel/entry-common.S
+ *
+ * Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/config.h>
+
+#include <asm/thread_info.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+
+#include "entry-header.S"
+
+/*
+ * We rely on the fact that R0 is at the bottom of the stack (due to
+ * slow/fast restore user regs).
+ */
+#if S_R0 != 0
+#error "Please fix"
+#endif
+
+ .align 5
+/*
+ * This is the fast syscall return path. We do as little as
+ * possible here, and this includes saving r0 back into the SVC
+ * stack.
+ */
+ret_fast_syscall:
+ disable_irq r1 @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS]
+ tst r1, #_TIF_WORK_MASK
+ bne fast_work_pending
+ fast_restore_user_regs
+
+/*
+ * Ok, we need to do extra processing, enter the slow path.
+ */
+fast_work_pending:
+ str r0, [sp, #S_R0+S_OFF]! @ returned r0
+work_pending:
+ tst r1, #_TIF_NEED_RESCHED
+ bne work_resched
+ tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
+ beq no_work_pending
+ mov r0, sp @ 'regs'
+ mov r2, why @ 'syscall'
+ bl do_notify_resume
+ disable_irq r1 @ disable interrupts
+ b no_work_pending
+
+work_resched:
+ bl schedule
+/*
+ * "slow" syscall return path. "why" tells us if this was a real syscall.
+ */
+ENTRY(ret_to_user)
+ret_slow_syscall:
+ disable_irq r1 @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS]
+ tst r1, #_TIF_WORK_MASK
+ bne work_pending
+no_work_pending:
+ slow_restore_user_regs
+
+/*
+ * This is how we return from a fork.
+ */
+ENTRY(ret_from_fork)
+ bl schedule_tail
+ get_thread_info tsk
+ ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
+ mov why, #1
+ tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
+ beq ret_slow_syscall
+ mov r1, sp
+ mov r0, #1 @ trace exit [IP = 1]
+ bl syscall_trace
+ b ret_slow_syscall
+
+
+#include "calls.S"
+
+/*=============================================================================
+ * SWI handler
+ *-----------------------------------------------------------------------------
+ */
+
+ /* If we're optimising for StrongARM the resulting code won't
+ run on an ARM7 and we can save a couple of instructions.
+ --pb */
+#ifdef CONFIG_CPU_ARM710
+ .macro arm710_bug_check, instr, temp
+ and \temp, \instr, #0x0f000000 @ check for SWI
+ teq \temp, #0x0f000000
+ bne .Larm700bug
+ .endm
+
+.Larm700bug:
+ ldr r0, [sp, #S_PSR] @ Get calling cpsr
+ sub lr, lr, #4
+ str lr, [r8]
+ msr spsr_cxsf, r0
+ ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
+ mov r0, r0
+ ldr lr, [sp, #S_PC] @ Get PC
+ add sp, sp, #S_FRAME_SIZE
+ movs pc, lr
+#else
+ .macro arm710_bug_check, instr, temp
+ .endm
+#endif
+
+ .align 5
+ENTRY(vector_swi)
+ save_user_regs
+ zero_fp
+ get_scno
+ arm710_bug_check scno, ip
+
+#ifdef CONFIG_ALIGNMENT_TRAP
+ ldr ip, __cr_alignment
+ ldr ip, [ip]
+ mcr p15, 0, ip, c1, c0 @ update control register
+#endif
+ enable_irq ip
+
+ str r4, [sp, #-S_OFF]! @ push fifth arg
+
+ get_thread_info tsk
+ ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
+ bic scno, scno, #0xff000000 @ mask off SWI op-code
+ eor scno, scno, #OS_NUMBER << 20 @ check OS number
+ adr tbl, sys_call_table @ load syscall table pointer
+ tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
+ bne __sys_trace
+
+ adr lr, ret_fast_syscall @ return address
+ cmp scno, #NR_syscalls @ check upper syscall limit
+ ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
+
+ add r1, sp, #S_OFF
+2: mov why, #0 @ no longer a real syscall
+ cmp scno, #ARMSWI_OFFSET
+ eor r0, scno, #OS_NUMBER << 20 @ put OS number back
+ bcs arm_syscall
+ b sys_ni_syscall @ not private func
+
+ /*
+ * This is the really slow path. We're going to be doing
+ * context switches, and waiting for our parent to respond.
+ */
+__sys_trace:
+ add r1, sp, #S_OFF
+ mov r0, #0 @ trace entry [IP = 0]
+ bl syscall_trace
+
+ adr lr, __sys_trace_return @ return address
+ add r1, sp, #S_R0 + S_OFF @ pointer to regs
+ cmp scno, #NR_syscalls @ check upper syscall limit
+ ldmccia r1, {r0 - r3} @ have to reload r0 - r3
+ ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
+ b 2b
+
+__sys_trace_return:
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+ mov r1, sp
+ mov r0, #1 @ trace exit [IP = 1]
+ bl syscall_trace
+ b ret_slow_syscall
+
+ .align 5
+#ifdef CONFIG_ALIGNMENT_TRAP
+ .type __cr_alignment, #object
+__cr_alignment:
+ .word cr_alignment
+#endif
+
+ .type sys_call_table, #object
+ENTRY(sys_call_table)
+#include "calls.S"
+
+/*============================================================================
+ * Special system call wrappers
+ */
+@ r0 = syscall number
+@ r5 = syscall table
+ .type sys_syscall, #function
+sys_syscall:
+ eor scno, r0, #OS_NUMBER << 20
+ cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
+ cmpne scno, #NR_syscalls @ check range
+ stmloia sp, {r5, r6} @ shuffle args
+ movlo r0, r1
+ movlo r1, r2
+ movlo r2, r3
+ movlo r3, r4
+ ldrlo pc, [tbl, scno, lsl #2]
+ b sys_ni_syscall
+
+sys_fork_wrapper:
+ add r0, sp, #S_OFF
+ b sys_fork
+
+sys_vfork_wrapper:
+ add r0, sp, #S_OFF
+ b sys_vfork
+
+sys_execve_wrapper:
+ add r3, sp, #S_OFF
+ b sys_execve
+
+sys_clone_wrapper:
+ add ip, sp, #S_OFF
+ str ip, [sp, #4]
+ b sys_clone
+
+sys_sigsuspend_wrapper:
+ add r3, sp, #S_OFF
+ b sys_sigsuspend
+
+sys_rt_sigsuspend_wrapper:
+ add r2, sp, #S_OFF
+ b sys_rt_sigsuspend
+
+sys_sigreturn_wrapper:
+ add r0, sp, #S_OFF
+ b sys_sigreturn
+
+sys_rt_sigreturn_wrapper:
+ add r0, sp, #S_OFF
+ b sys_rt_sigreturn
+
+sys_sigaltstack_wrapper:
+ ldr r2, [sp, #S_OFF + S_SP]
+ b do_sigaltstack
+
+sys_futex_wrapper:
+ str r5, [sp, #4] @ push sixth arg
+ b sys_futex
+
+/*
+ * Note: off_4k (r5) is always units of 4K. If we can't do the requested
+ * offset, we return EINVAL.
+ */
+sys_mmap2:
+#if PAGE_SHIFT > 12
+ tst r5, #PGOFF_MASK
+ moveq r5, r5, lsr #PAGE_SHIFT - 12
+ streq r5, [sp, #4]
+ beq do_mmap2
+ mov r0, #-EINVAL
+ RETINSTR(mov,pc, lr)
+#else
+ str r5, [sp, #4]
+ b do_mmap2
+#endif
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
new file mode 100644
index 00000000000..4039d8c120b
--- /dev/null
+++ b/arch/arm/kernel/entry-header.S
@@ -0,0 +1,182 @@
+#include <linux/config.h> /* for CONFIG_ARCH_xxxx */
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/constants.h>
+#include <asm/errno.h>
+#include <asm/hardware.h>
+#include <asm/arch/irqs.h>
+#include <asm/arch/entry-macro.S>
+
+#ifndef MODE_SVC
+#define MODE_SVC 0x13
+#endif
+
+ .macro zero_fp
+#ifdef CONFIG_FRAME_POINTER
+ mov fp, #0
+#endif
+ .endm
+
+ .text
+
+@ Bad Abort numbers
+@ -----------------
+@
+#define BAD_PREFETCH 0
+#define BAD_DATA 1
+#define BAD_ADDREXCPTN 2
+#define BAD_IRQ 3
+#define BAD_UNDEFINSTR 4
+
+#define PT_TRACESYS 0x00000002
+
+@ OS version number used in SWIs
+@ RISC OS is 0
+@ RISC iX is 8
+@
+#define OS_NUMBER 9
+#define ARMSWI_OFFSET 0x000f0000
+
+@
+@ Stack format (ensured by USER_* and SVC_*)
+@
+#define S_FRAME_SIZE 72
+#define S_OLD_R0 68
+#define S_PSR 64
+
+#define S_PC 60
+#define S_LR 56
+#define S_SP 52
+#define S_IP 48
+#define S_FP 44
+#define S_R10 40
+#define S_R9 36
+#define S_R8 32
+#define S_R7 28
+#define S_R6 24
+#define S_R5 20
+#define S_R4 16
+#define S_R3 12
+#define S_R2 8
+#define S_R1 4
+#define S_R0 0
+#define S_OFF 8
+
+ .macro set_cpsr_c, reg, mode
+ msr cpsr_c, \mode
+ .endm
+
+#if __LINUX_ARM_ARCH__ >= 6
+ .macro disable_irq, temp
+ cpsid i
+ .endm
+
+ .macro enable_irq, temp
+ cpsie i
+ .endm
+#else
+ .macro disable_irq, temp
+ set_cpsr_c \temp, #PSR_I_BIT | MODE_SVC
+ .endm
+
+ .macro enable_irq, temp
+ set_cpsr_c \temp, #MODE_SVC
+ .endm
+#endif
+
+ .macro save_user_regs
+ sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ Calling r0 - r12
+ add r8, sp, #S_PC
+ stmdb r8, {sp, lr}^ @ Calling sp, lr
+ mrs r8, spsr @ called from non-FIQ mode, so ok.
+ str lr, [sp, #S_PC] @ Save calling PC
+ str r8, [sp, #S_PSR] @ Save CPSR
+ str r0, [sp, #S_OLD_R0] @ Save OLD_R0
+ .endm
+
+ .macro restore_user_regs
+ ldr r1, [sp, #S_PSR] @ Get calling cpsr
+ disable_irq ip @ disable IRQs
+ ldr lr, [sp, #S_PC]! @ Get PC
+ msr spsr_cxsf, r1 @ save in spsr_svc
+ ldmdb sp, {r0 - lr}^ @ Get calling r0 - lr
+ mov r0, r0
+ add sp, sp, #S_FRAME_SIZE - S_PC
+ movs pc, lr @ return & move spsr_svc into cpsr
+ .endm
+
+/*
+ * Must be called with IRQs already disabled.
+ */
+ .macro fast_restore_user_regs
+ ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
+ ldr lr, [sp, #S_OFF + S_PC]! @ get pc
+ msr spsr_cxsf, r1 @ save in spsr_svc
+ ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
+ mov r0, r0
+ add sp, sp, #S_FRAME_SIZE - S_PC
+ movs pc, lr @ return & move spsr_svc into cpsr
+ .endm
+
+/*
+ * Must be called with IRQs already disabled.
+ */
+ .macro slow_restore_user_regs
+ ldr r1, [sp, #S_PSR] @ get calling cpsr
+ ldr lr, [sp, #S_PC]! @ get pc
+ msr spsr_cxsf, r1 @ save in spsr_svc
+ ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
+ mov r0, r0
+ add sp, sp, #S_FRAME_SIZE - S_PC
+ movs pc, lr @ return & move spsr_svc into cpsr
+ .endm
+
+ .macro mask_pc, rd, rm
+ .endm
+
+ .macro get_thread_info, rd
+ mov \rd, sp, lsr #13
+ mov \rd, \rd, lsl #13
+ .endm
+
+ .macro alignment_trap, rbase, rtemp, sym
+#ifdef CONFIG_ALIGNMENT_TRAP
+#define OFF_CR_ALIGNMENT(x) cr_alignment - x
+
+ ldr \rtemp, [\rbase, #OFF_CR_ALIGNMENT(\sym)]
+ mcr p15, 0, \rtemp, c1, c0
+#endif
+ .endm
+
+
+/*
+ * These are the registers used in the syscall handler, and allow us to
+ * have in theory up to 7 arguments to a function - r0 to r6.
+ *
+ * r7 is reserved for the system call number for thumb mode.
+ *
+ * Note that tbl == why is intentional.
+ *
+ * We must set at least "tsk" and "why" when calling ret_with_reschedule.
+ */
+scno .req r7 @ syscall number
+tbl .req r8 @ syscall table pointer
+why .req r8 @ Linux syscall (!= 0)
+tsk .req r9 @ current thread_info
+
+/*
+ * Get the system call number.
+ */
+ .macro get_scno
+#ifdef CONFIG_ARM_THUMB
+ tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
+ addne scno, r7, #OS_NUMBER << 20 @ put OS number in
+ ldreq scno, [lr, #-4]
+
+#else
+ mask_pc lr, lr
+ ldr scno, [lr, #-4] @ get SWI instruction
+#endif
+ .endm
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
new file mode 100644
index 00000000000..9299dfc2569
--- /dev/null
+++ b/arch/arm/kernel/fiq.c
@@ -0,0 +1,181 @@
+/*
+ * linux/arch/arm/kernel/fiq.c
+ *
+ * Copyright (C) 1998 Russell King
+ * Copyright (C) 1998, 1999 Phil Blundell
+ *
+ * FIQ support written by Philip Blundell <philb@gnu.org>, 1998.
+ *
+ * FIQ support re-written by Russell King to be more generic
+ *
+ * We now properly support a method by which the FIQ handlers can
+ * be stacked onto the vector. We still do not support sharing
+ * the FIQ vector itself.
+ *
+ * Operation is as follows:
+ * 1. Owner A claims FIQ:
+ * - default_fiq relinquishes control.
+ * 2. Owner A:
+ * - inserts code.
+ * - sets any registers,
+ * - enables FIQ.
+ * 3. Owner B claims FIQ:
+ * - if owner A has a relinquish function.
+ * - disable FIQs.
+ * - saves any registers.
+ * - returns zero.
+ * 4. Owner B:
+ * - inserts code.
+ * - sets any registers,
+ * - enables FIQ.
+ * 5. Owner B releases FIQ:
+ * - Owner A is asked to reacquire FIQ:
+ * - inserts code.
+ * - restores saved registers.
+ * - enables FIQ.
+ * 6. Goto 3
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+
+#include <asm/cacheflush.h>
+#include <asm/fiq.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+static unsigned long no_fiq_insn;
+
+/* Default reacquire function
+ * - we always relinquish FIQ control
+ * - we always reacquire FIQ control
+ */
+static int fiq_def_op(void *ref, int relinquish)
+{
+ if (!relinquish)
+ set_fiq_handler(&no_fiq_insn, sizeof(no_fiq_insn));
+
+ return 0;
+}
+
+static struct fiq_handler default_owner = {
+ .name = "default",
+ .fiq_op = fiq_def_op,
+};
+
+static struct fiq_handler *current_fiq = &default_owner;
+
+int show_fiq_list(struct seq_file *p, void *v)
+{
+ if (current_fiq != &default_owner)
+ seq_printf(p, "FIQ: %s\n", current_fiq->name);
+
+ return 0;
+}
+
+void set_fiq_handler(void *start, unsigned int length)
+{
+ memcpy((void *)0xffff001c, start, length);
+ flush_icache_range(0xffff001c, 0xffff001c + length);
+ if (!vectors_high())
+ flush_icache_range(0x1c, 0x1c + length);
+}
+
+/*
+ * Taking an interrupt in FIQ mode is death, so both these functions
+ * disable irqs for the duration. Note - these functions are almost
+ * entirely coded in assembly.
+ */
+void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs)
+{
+ register unsigned long tmp;
+ asm volatile (
+ "mov ip, sp\n\
+ stmfd sp!, {fp, ip, lr, pc}\n\
+ sub fp, ip, #4\n\
+ mrs %0, cpsr\n\
+ msr cpsr_c, %2 @ select FIQ mode\n\
+ mov r0, r0\n\
+ ldmia %1, {r8 - r14}\n\
+ msr cpsr_c, %0 @ return to SVC mode\n\
+ mov r0, r0\n\
+ ldmea fp, {fp, sp, pc}"
+ : "=&r" (tmp)
+ : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
+}
+
+void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs)
+{
+ register unsigned long tmp;
+ asm volatile (
+ "mov ip, sp\n\
+ stmfd sp!, {fp, ip, lr, pc}\n\
+ sub fp, ip, #4\n\
+ mrs %0, cpsr\n\
+ msr cpsr_c, %2 @ select FIQ mode\n\
+ mov r0, r0\n\
+ stmia %1, {r8 - r14}\n\
+ msr cpsr_c, %0 @ return to SVC mode\n\
+ mov r0, r0\n\
+ ldmea fp, {fp, sp, pc}"
+ : "=&r" (tmp)
+ : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
+}
+
+int claim_fiq(struct fiq_handler *f)
+{
+ int ret = 0;
+
+ if (current_fiq) {
+ ret = -EBUSY;
+
+ if (current_fiq->fiq_op != NULL)
+ ret = current_fiq->fiq_op(current_fiq->dev_id, 1);
+ }
+
+ if (!ret) {
+ f->next = current_fiq;
+ current_fiq = f;
+ }
+
+ return ret;
+}
+
+void release_fiq(struct fiq_handler *f)
+{
+ if (current_fiq != f) {
+ printk(KERN_ERR "%s FIQ trying to release %s FIQ\n",
+ f->name, current_fiq->name);
+ dump_stack();
+ return;
+ }
+
+ do
+ current_fiq = current_fiq->next;
+ while (current_fiq->fiq_op(current_fiq->dev_id, 0));
+}
+
+void enable_fiq(int fiq)
+{
+ enable_irq(fiq + FIQ_START);
+}
+
+void disable_fiq(int fiq)
+{
+ disable_irq(fiq + FIQ_START);
+}
+
+EXPORT_SYMBOL(set_fiq_handler);
+EXPORT_SYMBOL(set_fiq_regs);
+EXPORT_SYMBOL(get_fiq_regs);
+EXPORT_SYMBOL(claim_fiq);
+EXPORT_SYMBOL(release_fiq);
+EXPORT_SYMBOL(enable_fiq);
+EXPORT_SYMBOL(disable_fiq);
+
+void __init init_FIQ(void)
+{
+ no_fiq_insn = *(unsigned long *)0xffff001c;
+}
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
new file mode 100644
index 00000000000..171b3e811c7
--- /dev/null
+++ b/arch/arm/kernel/head.S
@@ -0,0 +1,516 @@
+/*
+ * linux/arch/arm/kernel/head.S
+ *
+ * Copyright (C) 1994-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Kernel startup code for all 32-bit CPUs
+ */
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+#include <asm/domain.h>
+#include <asm/mach-types.h>
+#include <asm/procinfo.h>
+#include <asm/ptrace.h>
+#include <asm/constants.h>
+#include <asm/system.h>
+
+#define PROCINFO_MMUFLAGS 8
+#define PROCINFO_INITFUNC 12
+
+#define MACHINFO_TYPE 0
+#define MACHINFO_PHYSRAM 4
+#define MACHINFO_PHYSIO 8
+#define MACHINFO_PGOFFIO 12
+#define MACHINFO_NAME 16
+
+#ifndef CONFIG_XIP_KERNEL
+/*
+ * We place the page tables 16K below TEXTADDR. Therefore, we must make sure
+ * that TEXTADDR is correctly set. Currently, we expect the least significant
+ * 16 bits to be 0x8000, but we could probably relax this restriction to
+ * TEXTADDR >= PAGE_OFFSET + 0x4000
+ *
+ * Note that swapper_pg_dir is the virtual address of the page tables, and
+ * pgtbl gives us a position-independent reference to these tables. We can
+ * do this because stext == TEXTADDR
+ */
+#if (TEXTADDR & 0xffff) != 0x8000
+#error TEXTADDR must start at 0xXXXX8000
+#endif
+
+ .globl swapper_pg_dir
+ .equ swapper_pg_dir, TEXTADDR - 0x4000
+
+ .macro pgtbl, rd, phys
+ adr \rd, stext
+ sub \rd, \rd, #0x4000
+ .endm
+#else
+/*
+ * XIP Kernel:
+ *
+ * We place the page tables 16K below DATAADDR. Therefore, we must make sure
+ * that DATAADDR is correctly set. Currently, we expect the least significant
+ * 16 bits to be 0x8000, but we could probably relax this restriction to
+ * DATAADDR >= PAGE_OFFSET + 0x4000
+ *
+ * Note that pgtbl is meant to return the physical address of swapper_pg_dir.
+ * We can't make it relative to the kernel position in this case since
+ * the kernel can physically be anywhere.
+ */
+#if (DATAADDR & 0xffff) != 0x8000
+#error DATAADDR must start at 0xXXXX8000
+#endif
+
+ .globl swapper_pg_dir
+ .equ swapper_pg_dir, DATAADDR - 0x4000
+
+ .macro pgtbl, rd, phys
+ ldr \rd, =((DATAADDR - 0x4000) - VIRT_OFFSET)
+ add \rd, \rd, \phys
+ .endm
+#endif
+
+/*
+ * Kernel startup entry point.
+ * ---------------------------
+ *
+ * This is normally called from the decompressor code. The requirements
+ * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
+ * r1 = machine nr.
+ *
+ * This code is mostly position independent, so if you link the kernel at
+ * 0xc0008000, you call this at __pa(0xc0008000).
+ *
+ * See linux/arch/arm/tools/mach-types for the complete list of machine
+ * numbers for r1.
+ *
+ * We're trying to keep crap to a minimum; DO NOT add any machine specific
+ * crap here - that's what the boot loader (or in extreme, well justified
+ * circumstances, zImage) is for.
+ */
+ __INIT
+ .type stext, %function
+ENTRY(stext)
+ msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
+ @ and irqs disabled
+ bl __lookup_processor_type @ r5=procinfo r9=cpuid
+ movs r10, r5 @ invalid processor (r5=0)?
+ beq __error_p @ yes, error 'p'
+ bl __lookup_machine_type @ r5=machinfo
+ movs r8, r5 @ invalid machine (r5=0)?
+ beq __error_a @ yes, error 'a'
+ bl __create_page_tables
+
+ /*
+ * The following calls CPU specific code in a position independent
+ * manner. See arch/arm/mm/proc-*.S for details. r10 = base of
+ * xxx_proc_info structure selected by __lookup_machine_type
+ * above. On return, the CPU will be ready for the MMU to be
+ * turned on, and r0 will hold the CPU control register value.
+ */
+ ldr r13, __switch_data @ address to jump to after
+ @ mmu has been enabled
+ adr lr, __enable_mmu @ return (PIC) address
+ add pc, r10, #PROCINFO_INITFUNC
+
+ .type __switch_data, %object
+__switch_data:
+ .long __mmap_switched
+ .long __data_loc @ r4
+ .long __data_start @ r5
+ .long __bss_start @ r6
+ .long _end @ r7
+ .long processor_id @ r4
+ .long __machine_arch_type @ r5
+ .long cr_alignment @ r6
+ .long init_thread_union+8192 @ sp
+
+/*
+ * The following fragment of code is executed with the MMU on, and uses
+ * absolute addresses; this is not position independent.
+ *
+ * r0 = cp#15 control register
+ * r1 = machine ID
+ * r9 = processor ID
+ */
+ .type __mmap_switched, %function
+__mmap_switched:
+ adr r3, __switch_data + 4
+
+ ldmia r3!, {r4, r5, r6, r7}
+ cmp r4, r5 @ Copy data segment if needed
+1: cmpne r5, r6
+ ldrne fp, [r4], #4
+ strne fp, [r5], #4
+ bne 1b
+
+ mov fp, #0 @ Clear BSS (and zero fp)
+1: cmp r6, r7
+ strcc fp, [r6],#4
+ bcc 1b
+
+ ldmia r3, {r4, r5, r6, sp}
+ str r9, [r4] @ Save processor ID
+ str r1, [r5] @ Save machine type
+ bic r4, r0, #CR_A @ Clear 'A' bit
+ stmia r6, {r0, r4} @ Save control register values
+ b start_kernel
+
+
+
+/*
+ * Setup common bits before finally enabling the MMU. Essentially
+ * this is just loading the page table pointer and domain access
+ * registers.
+ */
+ .type __enable_mmu, %function
+__enable_mmu:
+#ifdef CONFIG_ALIGNMENT_TRAP
+ orr r0, r0, #CR_A
+#else
+ bic r0, r0, #CR_A
+#endif
+#ifdef CONFIG_CPU_DCACHE_DISABLE
+ bic r0, r0, #CR_C
+#endif
+#ifdef CONFIG_CPU_BPREDICT_DISABLE
+ bic r0, r0, #CR_Z
+#endif
+#ifdef CONFIG_CPU_ICACHE_DISABLE
+ bic r0, r0, #CR_I
+#endif
+ mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+ mcr p15, 0, r5, c3, c0, 0 @ load domain access register
+ mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
+ b __turn_mmu_on
+
+/*
+ * Enable the MMU. This completely changes the structure of the visible
+ * memory space. You will not be able to trace execution through this.
+ * If you have an enquiry about this, *please* check the linux-arm-kernel
+ * mailing list archives BEFORE sending another post to the list.
+ *
+ * r0 = cp#15 control register
+ * r13 = *virtual* address to jump to upon completion
+ *
+ * other registers depend on the function called upon completion
+ */
+ .align 5
+ .type __turn_mmu_on, %function
+__turn_mmu_on:
+ mov r0, r0
+ mcr p15, 0, r0, c1, c0, 0 @ write control reg
+ mrc p15, 0, r3, c0, c0, 0 @ read id reg
+ mov r3, r3
+ mov r3, r3
+ mov pc, r13
+
+
+
+/*
+ * Setup the initial page tables. We only setup the barest
+ * amount which are required to get the kernel running, which
+ * generally means mapping in the kernel code.
+ *
+ * r8 = machinfo
+ * r9 = cpuid
+ * r10 = procinfo
+ *
+ * Returns:
+ * r0, r3, r5, r6, r7 corrupted
+ * r4 = physical page table address
+ */
+ .type __create_page_tables, %function
+__create_page_tables:
+ ldr r5, [r8, #MACHINFO_PHYSRAM] @ physram
+ pgtbl r4, r5 @ page table address
+
+ /*
+ * Clear the 16K level 1 swapper page table
+ */
+ mov r0, r4
+ mov r3, #0
+ add r6, r0, #0x4000
+1: str r3, [r0], #4
+ str r3, [r0], #4
+ str r3, [r0], #4
+ str r3, [r0], #4
+ teq r0, r6
+ bne 1b
+
+ ldr r7, [r10, #PROCINFO_MMUFLAGS] @ mmuflags
+
+ /*
+ * Create identity mapping for first MB of kernel to
+ * cater for the MMU enable. This identity mapping
+ * will be removed by paging_init(). We use our current program
+ * counter to determine corresponding section base address.
+ */
+ mov r6, pc, lsr #20 @ start of kernel section
+ orr r3, r7, r6, lsl #20 @ flags + kernel base
+ str r3, [r4, r6, lsl #2] @ identity mapping
+
+ /*
+ * Now setup the pagetables for our kernel direct
+ * mapped region. We round TEXTADDR down to the
+ * nearest megabyte boundary. It is assumed that
+ * the kernel fits within 4 contigous 1MB sections.
+ */
+ add r0, r4, #(TEXTADDR & 0xff000000) >> 18 @ start of kernel
+ str r3, [r0, #(TEXTADDR & 0x00f00000) >> 18]!
+ add r3, r3, #1 << 20
+ str r3, [r0, #4]! @ KERNEL + 1MB
+ add r3, r3, #1 << 20
+ str r3, [r0, #4]! @ KERNEL + 2MB
+ add r3, r3, #1 << 20
+ str r3, [r0, #4] @ KERNEL + 3MB
+
+ /*
+ * Then map first 1MB of ram in case it contains our boot params.
+ */
+ add r0, r4, #VIRT_OFFSET >> 18
+ orr r6, r5, r7
+ str r6, [r0]
+
+#ifdef CONFIG_XIP_KERNEL
+ /*
+ * Map some ram to cover our .data and .bss areas.
+ * Mapping 3MB should be plenty.
+ */
+ sub r3, r4, r5
+ mov r3, r3, lsr #20
+ add r0, r0, r3, lsl #2
+ add r6, r6, r3, lsl #20
+ str r6, [r0], #4
+ add r6, r6, #(1 << 20)
+ str r6, [r0], #4
+ add r6, r6, #(1 << 20)
+ str r6, [r0]
+#endif
+
+ bic r7, r7, #0x0c @ turn off cacheable
+ @ and bufferable bits
+#ifdef CONFIG_DEBUG_LL
+ /*
+ * Map in IO space for serial debugging.
+ * This allows debug messages to be output
+ * via a serial console before paging_init.
+ */
+ ldr r3, [r8, #MACHINFO_PGOFFIO]
+ add r0, r4, r3
+ rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
+ cmp r3, #0x0800 @ limit to 512MB
+ movhi r3, #0x0800
+ add r6, r0, r3
+ ldr r3, [r8, #MACHINFO_PHYSIO]
+ orr r3, r3, r7
+1: str r3, [r0], #4
+ add r3, r3, #1 << 20
+ teq r0, r6
+ bne 1b
+#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
+ /*
+ * If we're using the NetWinder, we need to map in
+ * the 16550-type serial port for the debug messages
+ */
+ teq r1, #MACH_TYPE_NETWINDER
+ teqne r1, #MACH_TYPE_CATS
+ bne 1f
+ add r0, r4, #0x3fc0 @ ff000000
+ mov r3, #0x7c000000
+ orr r3, r3, r7
+ str r3, [r0], #4
+ add r3, r3, #1 << 20
+ str r3, [r0], #4
+1:
+#endif
+#endif
+#ifdef CONFIG_ARCH_RPC
+ /*
+ * Map in screen at 0x02000000 & SCREEN2_BASE
+ * Similar reasons here - for debug. This is
+ * only for Acorn RiscPC architectures.
+ */
+ add r0, r4, #0x80 @ 02000000
+ mov r3, #0x02000000
+ orr r3, r3, r7
+ str r3, [r0]
+ add r0, r4, #0x3600 @ d8000000
+ str r3, [r0]
+#endif
+ mov pc, lr
+ .ltorg
+
+
+
+/*
+ * Exception handling. Something went wrong and we can't proceed. We
+ * ought to tell the user, but since we don't have any guarantee that
+ * we're even running on the right architecture, we do virtually nothing.
+ *
+ * If CONFIG_DEBUG_LL is set we try to print out something about the error
+ * and hope for the best (useful if bootloader fails to pass a proper
+ * machine ID for example).
+ */
+
+ .type __error_p, %function
+__error_p:
+#ifdef CONFIG_DEBUG_LL
+ adr r0, str_p1
+ bl printascii
+ b __error
+str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n"
+ .align
+#endif
+
+ .type __error_a, %function
+__error_a:
+#ifdef CONFIG_DEBUG_LL
+ mov r4, r1 @ preserve machine ID
+ adr r0, str_a1
+ bl printascii
+ mov r0, r4
+ bl printhex8
+ adr r0, str_a2
+ bl printascii
+ adr r3, 3f
+ ldmia r3, {r4, r5, r6} @ get machine desc list
+ sub r4, r3, r4 @ get offset between virt&phys
+ add r5, r5, r4 @ convert virt addresses to
+ add r6, r6, r4 @ physical address space
+1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
+ bl printhex8
+ mov r0, #'\t'
+ bl printch
+ ldr r0, [r5, #MACHINFO_NAME] @ get machine name
+ add r0, r0, r4
+ bl printascii
+ mov r0, #'\n'
+ bl printch
+ add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
+ cmp r5, r6
+ blo 1b
+ adr r0, str_a3
+ bl printascii
+ b __error
+str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
+str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
+str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
+ .align
+#endif
+
+ .type __error, %function
+__error:
+#ifdef CONFIG_ARCH_RPC
+/*
+ * Turn the screen red on a error - RiscPC only.
+ */
+ mov r0, #0x02000000
+ mov r3, #0x11
+ orr r3, r3, r3, lsl #8
+ orr r3, r3, r3, lsl #16
+ str r3, [r0], #4
+ str r3, [r0], #4
+ str r3, [r0], #4
+ str r3, [r0], #4
+#endif
+1: mov r0, r0
+ b 1b
+
+
+/*
+ * Read processor ID register (CP#15, CR0), and look up in the linker-built
+ * supported processor list. Note that we can't use the absolute addresses
+ * for the __proc_info lists since we aren't running with the MMU on
+ * (and therefore, we are not in the correct address space). We have to
+ * calculate the offset.
+ *
+ * Returns:
+ * r3, r4, r6 corrupted
+ * r5 = proc_info pointer in physical address space
+ * r9 = cpuid
+ */
+ .type __lookup_processor_type, %function
+__lookup_processor_type:
+ adr r3, 3f
+ ldmda r3, {r5, r6, r9}
+ sub r3, r3, r9 @ get offset between virt&phys
+ add r5, r5, r3 @ convert virt addresses to
+ add r6, r6, r3 @ physical address space
+ mrc p15, 0, r9, c0, c0 @ get processor id
+1: ldmia r5, {r3, r4} @ value, mask
+ and r4, r4, r9 @ mask wanted bits
+ teq r3, r4
+ beq 2f
+ add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
+ cmp r5, r6
+ blo 1b
+ mov r5, #0 @ unknown processor
+2: mov pc, lr
+
+/*
+ * This provides a C-API version of the above function.
+ */
+ENTRY(lookup_processor_type)
+ stmfd sp!, {r4 - r6, r9, lr}
+ bl __lookup_processor_type
+ mov r0, r5
+ ldmfd sp!, {r4 - r6, r9, pc}
+
+/*
+ * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
+ * more information about the __proc_info and __arch_info structures.
+ */
+ .long __proc_info_begin
+ .long __proc_info_end
+3: .long .
+ .long __arch_info_begin
+ .long __arch_info_end
+
+/*
+ * Lookup machine architecture in the linker-build list of architectures.
+ * Note that we can't use the absolute addresses for the __arch_info
+ * lists since we aren't running with the MMU on (and therefore, we are
+ * not in the correct address space). We have to calculate the offset.
+ *
+ * r1 = machine architecture number
+ * Returns:
+ * r3, r4, r6 corrupted
+ * r5 = mach_info pointer in physical address space
+ */
+ .type __lookup_machine_type, %function
+__lookup_machine_type:
+ adr r3, 3b
+ ldmia r3, {r4, r5, r6}
+ sub r3, r3, r4 @ get offset between virt&phys
+ add r5, r5, r3 @ convert virt addresses to
+ add r6, r6, r3 @ physical address space
+1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
+ teq r3, r1 @ matches loader number?
+ beq 2f @ found
+ add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
+ cmp r5, r6
+ blo 1b
+ mov r5, #0 @ unknown machine
+2: mov pc, lr
+
+/*
+ * This provides a C-API version of the above function.
+ */
+ENTRY(lookup_machine_type)
+ stmfd sp!, {r4 - r6, lr}
+ mov r1, r0
+ bl __lookup_machine_type
+ mov r0, r5
+ ldmfd sp!, {r4 - r6, pc}
diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c
new file mode 100644
index 00000000000..a00cca0000b
--- /dev/null
+++ b/arch/arm/kernel/init_task.c
@@ -0,0 +1,44 @@
+/*
+ * linux/arch/arm/kernel/init_task.c
+ */
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by making sure
+ * the linker maps this in the .text segment right after head.S,
+ * and making head.S ensure the proper alignment.
+ *
+ * The things we do for performance..
+ */
+union thread_union init_thread_union
+ __attribute__((__section__(".init.task"))) =
+ { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c
new file mode 100644
index 00000000000..6c20c1188b6
--- /dev/null
+++ b/arch/arm/kernel/io.c
@@ -0,0 +1,51 @@
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ * This needs to be optimized.
+ */
+void _memcpy_fromio(void *to, void __iomem *from, size_t count)
+{
+ unsigned char *t = to;
+ while (count) {
+ count--;
+ *t = readb(from);
+ t++;
+ from++;
+ }
+}
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ * This needs to be optimized.
+ */
+void _memcpy_toio(void __iomem *to, const void *from, size_t count)
+{
+ const unsigned char *f = from;
+ while (count) {
+ count--;
+ writeb(*f, to);
+ f++;
+ to++;
+ }
+}
+
+/*
+ * "memset" on IO memory space.
+ * This needs to be optimized.
+ */
+void _memset_io(void __iomem *dst, int c, size_t count)
+{
+ while (count) {
+ count--;
+ writeb(c, dst);
+ dst++;
+ }
+}
+
+EXPORT_SYMBOL(_memcpy_fromio);
+EXPORT_SYMBOL(_memcpy_toio);
+EXPORT_SYMBOL(_memset_io);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
new file mode 100644
index 00000000000..ff187f4308f
--- /dev/null
+++ b/arch/arm/kernel/irq.c
@@ -0,0 +1,1038 @@
+/*
+ * linux/arch/arm/kernel/irq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ *
+ * IRQ's are in fact implemented a bit like signal handlers for the kernel.
+ * Naturally it's not a 1:1 relation, but there are similarities.
+ */
+#include <linux/config.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/kallsyms.h>
+#include <linux/proc_fs.h>
+
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/mach/irq.h>
+
+/*
+ * Maximum IRQ count. Currently, this is arbitary. However, it should
+ * not be set too low to prevent false triggering. Conversely, if it
+ * is set too high, then you could miss a stuck IRQ.
+ *
+ * Maybe we ought to set a timer and re-enable the IRQ at a later time?
+ */
+#define MAX_IRQ_CNT 100000
+
+static int noirqdebug;
+static volatile unsigned long irq_err_count;
+static DEFINE_SPINLOCK(irq_controller_lock);
+static LIST_HEAD(irq_pending);
+
+struct irqdesc irq_desc[NR_IRQS];
+void (*init_arch_irq)(void) __initdata = NULL;
+
+/*
+ * No architecture-specific irq_finish function defined in arm/arch/irqs.h.
+ */
+#ifndef irq_finish
+#define irq_finish(irq) do { } while (0)
+#endif
+
+/*
+ * Dummy mask/unmask handler
+ */
+void dummy_mask_unmask_irq(unsigned int irq)
+{
+}
+
+irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
+{
+ return IRQ_NONE;
+}
+
+void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
+{
+ irq_err_count += 1;
+ printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
+}
+
+static struct irqchip bad_chip = {
+ .ack = dummy_mask_unmask_irq,
+ .mask = dummy_mask_unmask_irq,
+ .unmask = dummy_mask_unmask_irq,
+};
+
+static struct irqdesc bad_irq_desc = {
+ .chip = &bad_chip,
+ .handle = do_bad_IRQ,
+ .pend = LIST_HEAD_INIT(bad_irq_desc.pend),
+ .disable_depth = 1,
+};
+
+#ifdef CONFIG_SMP
+void synchronize_irq(unsigned int irq)
+{
+ struct irqdesc *desc = irq_desc + irq;
+
+ while (desc->running)
+ barrier();
+}
+EXPORT_SYMBOL(synchronize_irq);
+
+#define smp_set_running(desc) do { desc->running = 1; } while (0)
+#define smp_clear_running(desc) do { desc->running = 0; } while (0)
+#else
+#define smp_set_running(desc) do { } while (0)
+#define smp_clear_running(desc) do { } while (0)
+#endif
+
+/**
+ * disable_irq_nosync - disable an irq without waiting
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Enables and disables
+ * are nested. We do this lazily.
+ *
+ * This function may be called from IRQ context.
+ */
+void disable_irq_nosync(unsigned int irq)
+{
+ struct irqdesc *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ desc->disable_depth++;
+ list_del_init(&desc->pend);
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+EXPORT_SYMBOL(disable_irq_nosync);
+
+/**
+ * disable_irq - disable an irq and wait for completion
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Enables and disables
+ * are nested. This functions waits for any pending IRQ
+ * handlers for this interrupt to complete before returning.
+ * If you use this function while holding a resource the IRQ
+ * handler may need you will deadlock.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+void disable_irq(unsigned int irq)
+{
+ struct irqdesc *desc = irq_desc + irq;
+
+ disable_irq_nosync(irq);
+ if (desc->action)
+ synchronize_irq(irq);
+}
+EXPORT_SYMBOL(disable_irq);
+
+/**
+ * enable_irq - enable interrupt handling on an irq
+ * @irq: Interrupt to enable
+ *
+ * Re-enables the processing of interrupts on this IRQ line.
+ * Note that this may call the interrupt handler, so you may
+ * get unexpected results if you hold IRQs disabled.
+ *
+ * This function may be called from IRQ context.
+ */
+void enable_irq(unsigned int irq)
+{
+ struct irqdesc *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ if (unlikely(!desc->disable_depth)) {
+ printk("enable_irq(%u) unbalanced from %p\n", irq,
+ __builtin_return_address(0));
+ } else if (!--desc->disable_depth) {
+ desc->probing = 0;
+ desc->chip->unmask(irq);
+
+ /*
+ * If the interrupt is waiting to be processed,
+ * try to re-run it. We can't directly run it
+ * from here since the caller might be in an
+ * interrupt-protected region.
+ */
+ if (desc->pending && list_empty(&desc->pend)) {
+ desc->pending = 0;
+ if (!desc->chip->retrigger ||
+ desc->chip->retrigger(irq))
+ list_add(&desc->pend, &irq_pending);
+ }
+ }
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+EXPORT_SYMBOL(enable_irq);
+
+/*
+ * Enable wake on selected irq
+ */
+void enable_irq_wake(unsigned int irq)
+{
+ struct irqdesc *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ if (desc->chip->wake)
+ desc->chip->wake(irq, 1);
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+EXPORT_SYMBOL(enable_irq_wake);
+
+void disable_irq_wake(unsigned int irq)
+{
+ struct irqdesc *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ if (desc->chip->wake)
+ desc->chip->wake(irq, 0);
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+EXPORT_SYMBOL(disable_irq_wake);
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v, cpu;
+ struct irqaction * action;
+ unsigned long flags;
+
+ if (i == 0) {
+ char cpuname[12];
+
+ seq_printf(p, " ");
+ for_each_present_cpu(cpu) {
+ sprintf(cpuname, "CPU%d", cpu);
+ seq_printf(p, " %10s", cpuname);
+ }
+ seq_putc(p, '\n');
+ }
+
+ if (i < NR_IRQS) {
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ action = irq_desc[i].action;
+ if (!action)
+ goto unlock;
+
+ seq_printf(p, "%3d: ", i);
+ for_each_present_cpu(cpu)
+ seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
+ seq_printf(p, " %s", action->name);
+ for (action = action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+
+ seq_putc(p, '\n');
+unlock:
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+ } else if (i == NR_IRQS) {
+#ifdef CONFIG_ARCH_ACORN
+ show_fiq_list(p, v);
+#endif
+#ifdef CONFIG_SMP
+ show_ipi_list(p);
+#endif
+ seq_printf(p, "Err: %10lu\n", irq_err_count);
+ }
+ return 0;
+}
+
+/*
+ * IRQ lock detection.
+ *
+ * Hopefully, this should get us out of a few locked situations.
+ * However, it may take a while for this to happen, since we need
+ * a large number if IRQs to appear in the same jiffie with the
+ * same instruction pointer (or within 2 instructions).
+ */
+static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
+{
+ unsigned long instr_ptr = instruction_pointer(regs);
+
+ if (desc->lck_jif == jiffies &&
+ desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
+ desc->lck_cnt += 1;
+
+ if (desc->lck_cnt > MAX_IRQ_CNT) {
+ printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
+ return 1;
+ }
+ } else {
+ desc->lck_cnt = 0;
+ desc->lck_pc = instruction_pointer(regs);
+ desc->lck_jif = jiffies;
+ }
+ return 0;
+}
+
+static void
+report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
+{
+ static int count = 100;
+ struct irqaction *action;
+
+ if (!count || noirqdebug)
+ return;
+
+ count--;
+
+ if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
+ printk("irq%u: bogus retval mask %x\n", irq, ret);
+ } else {
+ printk("irq%u: nobody cared\n", irq);
+ }
+ show_regs(regs);
+ dump_stack();
+ printk(KERN_ERR "handlers:");
+ action = desc->action;
+ do {
+ printk("\n" KERN_ERR "[<%p>]", action->handler);
+ print_symbol(" (%s)", (unsigned long)action->handler);
+ action = action->next;
+ } while (action);
+ printk("\n");
+}
+
+static int
+__do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
+{
+ unsigned int status;
+ int ret, retval = 0;
+
+ spin_unlock(&irq_controller_lock);
+
+ if (!(action->flags & SA_INTERRUPT))
+ local_irq_enable();
+
+ status = 0;
+ do {
+ ret = action->handler(irq, action->dev_id, regs);
+ if (ret == IRQ_HANDLED)
+ status |= action->flags;
+ retval |= ret;
+ action = action->next;
+ } while (action);
+
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+
+ spin_lock_irq(&irq_controller_lock);
+
+ return retval;
+}
+
+/*
+ * This is for software-decoded IRQs. The caller is expected to
+ * handle the ack, clear, mask and unmask issues.
+ */
+void
+do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
+{
+ struct irqaction *action;
+ const unsigned int cpu = smp_processor_id();
+
+ desc->triggered = 1;
+
+ kstat_cpu(cpu).irqs[irq]++;
+
+ smp_set_running(desc);
+
+ action = desc->action;
+ if (action) {
+ int ret = __do_irq(irq, action, regs);
+ if (ret != IRQ_HANDLED)
+ report_bad_irq(irq, regs, desc, ret);
+ }
+
+ smp_clear_running(desc);
+}
+
+/*
+ * Most edge-triggered IRQ implementations seem to take a broken
+ * approach to this. Hence the complexity.
+ */
+void
+do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
+{
+ const unsigned int cpu = smp_processor_id();
+
+ desc->triggered = 1;
+
+ /*
+ * If we're currently running this IRQ, or its disabled,
+ * we shouldn't process the IRQ. Instead, turn on the
+ * hardware masks.
+ */
+ if (unlikely(desc->running || desc->disable_depth))
+ goto running;
+
+ /*
+ * Acknowledge and clear the IRQ, but don't mask it.
+ */
+ desc->chip->ack(irq);
+
+ /*
+ * Mark the IRQ currently in progress.
+ */
+ desc->running = 1;
+
+ kstat_cpu(cpu).irqs[irq]++;
+
+ do {
+ struct irqaction *action;
+
+ action = desc->action;
+ if (!action)
+ break;
+
+ if (desc->pending && !desc->disable_depth) {
+ desc->pending = 0;
+ desc->chip->unmask(irq);
+ }
+
+ __do_irq(irq, action, regs);
+ } while (desc->pending && !desc->disable_depth);
+
+ desc->running = 0;
+
+ /*
+ * If we were disabled or freed, shut down the handler.
+ */
+ if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
+ return;
+
+ running:
+ /*
+ * We got another IRQ while this one was masked or
+ * currently running. Delay it.
+ */
+ desc->pending = 1;
+ desc->chip->mask(irq);
+ desc->chip->ack(irq);
+}
+
+/*
+ * Level-based IRQ handler. Nice and simple.
+ */
+void
+do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
+{
+ struct irqaction *action;
+ const unsigned int cpu = smp_processor_id();
+
+ desc->triggered = 1;
+
+ /*
+ * Acknowledge, clear _AND_ disable the interrupt.
+ */
+ desc->chip->ack(irq);
+
+ if (likely(!desc->disable_depth)) {
+ kstat_cpu(cpu).irqs[irq]++;
+
+ smp_set_running(desc);
+
+ /*
+ * Return with this interrupt masked if no action
+ */
+ action = desc->action;
+ if (action) {
+ int ret = __do_irq(irq, desc->action, regs);
+
+ if (ret != IRQ_HANDLED)
+ report_bad_irq(irq, regs, desc, ret);
+
+ if (likely(!desc->disable_depth &&
+ !check_irq_lock(desc, irq, regs)))
+ desc->chip->unmask(irq);
+ }
+
+ smp_clear_running(desc);
+ }
+}
+
+static void do_pending_irqs(struct pt_regs *regs)
+{
+ struct list_head head, *l, *n;
+
+ do {
+ struct irqdesc *desc;
+
+ /*
+ * First, take the pending interrupts off the list.
+ * The act of calling the handlers may add some IRQs
+ * back onto the list.
+ */
+ head = irq_pending;
+ INIT_LIST_HEAD(&irq_pending);
+ head.next->prev = &head;
+ head.prev->next = &head;
+
+ /*
+ * Now run each entry. We must delete it from our
+ * list before calling the handler.
+ */
+ list_for_each_safe(l, n, &head) {
+ desc = list_entry(l, struct irqdesc, pend);
+ list_del_init(&desc->pend);
+ desc->handle(desc - irq_desc, desc, regs);
+ }
+
+ /*
+ * The list must be empty.
+ */
+ BUG_ON(!list_empty(&head));
+ } while (!list_empty(&irq_pending));
+}
+
+/*
+ * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
+ * come via this function. Instead, they should provide their
+ * own 'handler'
+ */
+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct irqdesc *desc = irq_desc + irq;
+
+ /*
+ * Some hardware gives randomly wrong interrupts. Rather
+ * than crashing, do something sensible.
+ */
+ if (irq >= NR_IRQS)
+ desc = &bad_irq_desc;
+
+ irq_enter();
+ spin_lock(&irq_controller_lock);
+ desc->handle(irq, desc, regs);
+
+ /*
+ * Now re-run any pending interrupts.
+ */
+ if (!list_empty(&irq_pending))
+ do_pending_irqs(regs);
+
+ irq_finish(irq);
+
+ spin_unlock(&irq_controller_lock);
+ irq_exit();
+}
+
+void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
+{
+ struct irqdesc *desc;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS) {
+ printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
+ return;
+ }
+
+ if (handle == NULL)
+ handle = do_bad_IRQ;
+
+ desc = irq_desc + irq;
+
+ if (is_chained && desc->chip == &bad_chip)
+ printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ if (handle == do_bad_IRQ) {
+ desc->chip->mask(irq);
+ desc->chip->ack(irq);
+ desc->disable_depth = 1;
+ }
+ desc->handle = handle;
+ if (handle != do_bad_IRQ && is_chained) {
+ desc->valid = 0;
+ desc->probe_ok = 0;
+ desc->disable_depth = 0;
+ desc->chip->unmask(irq);
+ }
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+
+void set_irq_chip(unsigned int irq, struct irqchip *chip)
+{
+ struct irqdesc *desc;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS) {
+ printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
+ return;
+ }
+
+ if (chip == NULL)
+ chip = &bad_chip;
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ desc->chip = chip;
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+
+int set_irq_type(unsigned int irq, unsigned int type)
+{
+ struct irqdesc *desc;
+ unsigned long flags;
+ int ret = -ENXIO;
+
+ if (irq >= NR_IRQS) {
+ printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
+ return -ENODEV;
+ }
+
+ desc = irq_desc + irq;
+ if (desc->chip->type) {
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ ret = desc->chip->type(irq, type);
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(set_irq_type);
+
+void set_irq_flags(unsigned int irq, unsigned int iflags)
+{
+ struct irqdesc *desc;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS) {
+ printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
+ return;
+ }
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ desc->valid = (iflags & IRQF_VALID) != 0;
+ desc->probe_ok = (iflags & IRQF_PROBE) != 0;
+ desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+
+int setup_irq(unsigned int irq, struct irqaction *new)
+{
+ int shared = 0;
+ struct irqaction *old, **p;
+ unsigned long flags;
+ struct irqdesc *desc;
+
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ p = &desc->action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ desc->probing = 0;
+ desc->running = 0;
+ desc->pending = 0;
+ desc->disable_depth = 1;
+ if (!desc->noautoenable) {
+ desc->disable_depth = 0;
+ desc->chip->unmask(irq);
+ }
+ }
+
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+ return 0;
+}
+
+/**
+ * request_irq - allocate an interrupt line
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs
+ * @irqflags: Interrupt type flags
+ * @devname: An ascii name for the claiming device
+ * @dev_id: A cookie passed back to the handler function
+ *
+ * This call allocates interrupt resources and enables the
+ * interrupt line and IRQ handling. From the point this
+ * call is made your handler function may be invoked. Since
+ * your handler function must clear any interrupt the board
+ * raises, you must take care both to initialise your hardware
+ * and to set up the interrupt handler in the right order.
+ *
+ * Dev_id must be globally unique. Normally the address of the
+ * device data structure is used as the cookie. Since the handler
+ * receives this value it makes sense to use it.
+ *
+ * If your interrupt is shared you must pass a non NULL dev_id
+ * as this is required when freeing the interrupt.
+ *
+ * Flags:
+ *
+ * SA_SHIRQ Interrupt is shared
+ *
+ * SA_INTERRUPT Disable local interrupts while processing
+ *
+ * SA_SAMPLE_RANDOM The interrupt can be used for entropy
+ *
+ */
+int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long irq_flags, const char * devname, void *dev_id)
+{
+ unsigned long retval;
+ struct irqaction *action;
+
+ if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
+ (irq_flags & SA_SHIRQ && !dev_id))
+ return -EINVAL;
+
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irq_flags;
+ cpus_clear(action->mask);
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+ retval = setup_irq(irq, action);
+
+ if (retval)
+ kfree(action);
+ return retval;
+}
+
+EXPORT_SYMBOL(request_irq);
+
+/**
+ * free_irq - free an interrupt
+ * @irq: Interrupt line to free
+ * @dev_id: Device identity to free
+ *
+ * Remove an interrupt handler. The handler is removed and if the
+ * interrupt line is no longer in use by any driver it is disabled.
+ * On a shared IRQ the caller must ensure the interrupt is disabled
+ * on the card it drives before calling this function.
+ *
+ * This function must not be called from interrupt context.
+ */
+void free_irq(unsigned int irq, void *dev_id)
+{
+ struct irqaction * action, **p;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS || !irq_desc[irq].valid) {
+ printk(KERN_ERR "Trying to free IRQ%d\n",irq);
+ dump_stack();
+ return;
+ }
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now free it */
+ *p = action->next;
+ break;
+ }
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+
+ if (!action) {
+ printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
+ dump_stack();
+ } else {
+ synchronize_irq(irq);
+ kfree(action);
+ }
+}
+
+EXPORT_SYMBOL(free_irq);
+
+static DECLARE_MUTEX(probe_sem);
+
+/* Start the interrupt probing. Unlike other architectures,
+ * we don't return a mask of interrupts from probe_irq_on,
+ * but return the number of interrupts enabled for the probe.
+ * The interrupts which have been enabled for probing is
+ * instead recorded in the irq_desc structure.
+ */
+unsigned long probe_irq_on(void)
+{
+ unsigned int i, irqs = 0;
+ unsigned long delay;
+
+ down(&probe_sem);
+
+ /*
+ * first snaffle up any unassigned but
+ * probe-able interrupts
+ */
+ spin_lock_irq(&irq_controller_lock);
+ for (i = 0; i < NR_IRQS; i++) {
+ if (!irq_desc[i].probe_ok || irq_desc[i].action)
+ continue;
+
+ irq_desc[i].probing = 1;
+ irq_desc[i].triggered = 0;
+ if (irq_desc[i].chip->type)
+ irq_desc[i].chip->type(i, IRQT_PROBE);
+ irq_desc[i].chip->unmask(i);
+ irqs += 1;
+ }
+ spin_unlock_irq(&irq_controller_lock);
+
+ /*
+ * wait for spurious interrupts to mask themselves out again
+ */
+ for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
+ /* min 100ms delay */;
+
+ /*
+ * now filter out any obviously spurious interrupts
+ */
+ spin_lock_irq(&irq_controller_lock);
+ for (i = 0; i < NR_IRQS; i++) {
+ if (irq_desc[i].probing && irq_desc[i].triggered) {
+ irq_desc[i].probing = 0;
+ irqs -= 1;
+ }
+ }
+ spin_unlock_irq(&irq_controller_lock);
+
+ return irqs;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+unsigned int probe_irq_mask(unsigned long irqs)
+{
+ unsigned int mask = 0, i;
+
+ spin_lock_irq(&irq_controller_lock);
+ for (i = 0; i < 16 && i < NR_IRQS; i++)
+ if (irq_desc[i].probing && irq_desc[i].triggered)
+ mask |= 1 << i;
+ spin_unlock_irq(&irq_controller_lock);
+
+ up(&probe_sem);
+
+ return mask;
+}
+EXPORT_SYMBOL(probe_irq_mask);
+
+/*
+ * Possible return values:
+ * >= 0 - interrupt number
+ * -1 - no interrupt/many interrupts
+ */
+int probe_irq_off(unsigned long irqs)
+{
+ unsigned int i;
+ int irq_found = NO_IRQ;
+
+ /*
+ * look at the interrupts, and find exactly one
+ * that we were probing has been triggered
+ */
+ spin_lock_irq(&irq_controller_lock);
+ for (i = 0; i < NR_IRQS; i++) {
+ if (irq_desc[i].probing &&
+ irq_desc[i].triggered) {
+ if (irq_found != NO_IRQ) {
+ irq_found = NO_IRQ;
+ goto out;
+ }
+ irq_found = i;
+ }
+ }
+
+ if (irq_found == -1)
+ irq_found = NO_IRQ;
+out:
+ spin_unlock_irq(&irq_controller_lock);
+
+ up(&probe_sem);
+
+ return irq_found;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+
+#ifdef CONFIG_SMP
+static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
+{
+ pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
+
+ spin_lock_irq(&irq_controller_lock);
+ desc->cpu = cpu;
+ desc->chip->set_cpu(desc, irq, cpu);
+ spin_unlock_irq(&irq_controller_lock);
+}
+
+#ifdef CONFIG_PROC_FS
+static int
+irq_affinity_read_proc(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct irqdesc *desc = irq_desc + ((int)data);
+ int len = cpumask_scnprintf(page, count, desc->affinity);
+
+ if (count - len < 2)
+ return -EINVAL;
+ page[len++] = '\n';
+ page[len] = '\0';
+
+ return len;
+}
+
+static int
+irq_affinity_write_proc(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ unsigned int irq = (unsigned int)data;
+ struct irqdesc *desc = irq_desc + irq;
+ cpumask_t affinity, tmp;
+ int ret = -EIO;
+
+ if (!desc->chip->set_cpu)
+ goto out;
+
+ ret = cpumask_parse(buffer, count, affinity);
+ if (ret)
+ goto out;
+
+ cpus_and(tmp, affinity, cpu_online_map);
+ if (cpus_empty(tmp)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ desc->affinity = affinity;
+ route_irq(desc, irq, first_cpu(tmp));
+ ret = count;
+
+ out:
+ return ret;
+}
+#endif
+#endif
+
+void __init init_irq_proc(void)
+{
+#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
+ struct proc_dir_entry *dir;
+ int irq;
+
+ dir = proc_mkdir("irq", 0);
+ if (!dir)
+ return;
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ struct proc_dir_entry *entry;
+ struct irqdesc *desc;
+ char name[16];
+
+ desc = irq_desc + irq;
+ memset(name, 0, sizeof(name));
+ snprintf(name, sizeof(name) - 1, "%u", irq);
+
+ desc->procdir = proc_mkdir(name, dir);
+ if (!desc->procdir)
+ continue;
+
+ entry = create_proc_entry("smp_affinity", 0600, desc->procdir);
+ if (entry) {
+ entry->nlink = 1;
+ entry->data = (void *)irq;
+ entry->read_proc = irq_affinity_read_proc;
+ entry->write_proc = irq_affinity_write_proc;
+ }
+ }
+#endif
+}
+
+void __init init_IRQ(void)
+{
+ struct irqdesc *desc;
+ extern void init_dma(void);
+ int irq;
+
+#ifdef CONFIG_SMP
+ bad_irq_desc.affinity = CPU_MASK_ALL;
+ bad_irq_desc.cpu = smp_processor_id();
+#endif
+
+ for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
+ *desc = bad_irq_desc;
+ INIT_LIST_HEAD(&desc->pend);
+ }
+
+ init_arch_irq();
+ init_dma();
+}
+
+static int __init noirqdebug_setup(char *str)
+{
+ noirqdebug = 1;
+ return 1;
+}
+
+__setup("noirqdebug", noirqdebug_setup);
diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c
new file mode 100644
index 00000000000..685c3e591a7
--- /dev/null
+++ b/arch/arm/kernel/isa.c
@@ -0,0 +1,53 @@
+/*
+ * linux/arch/arm/kernel/isa.c
+ *
+ * Copyright (C) 1999 Phil Blundell
+ *
+ * ISA shared memory and I/O port support
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * Nothing about this is actually ARM specific. One day we could move
+ * it into kernel/resource.c or some place like that.
+ */
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/sysctl.h>
+#include <linux/init.h>
+
+static unsigned int isa_membase, isa_portbase, isa_portshift;
+
+static ctl_table ctl_isa_vars[4] = {
+ {BUS_ISA_MEM_BASE, "membase", &isa_membase,
+ sizeof(isa_membase), 0444, NULL, &proc_dointvec},
+ {BUS_ISA_PORT_BASE, "portbase", &isa_portbase,
+ sizeof(isa_portbase), 0444, NULL, &proc_dointvec},
+ {BUS_ISA_PORT_SHIFT, "portshift", &isa_portshift,
+ sizeof(isa_portshift), 0444, NULL, &proc_dointvec},
+ {0}
+};
+
+static struct ctl_table_header *isa_sysctl_header;
+
+static ctl_table ctl_isa[2] = {{CTL_BUS_ISA, "isa", NULL, 0, 0555, ctl_isa_vars},
+ {0}};
+static ctl_table ctl_bus[2] = {{CTL_BUS, "bus", NULL, 0, 0555, ctl_isa},
+ {0}};
+
+void __init
+register_isa_ports(unsigned int membase, unsigned int portbase, unsigned int portshift)
+{
+ isa_membase = membase;
+ isa_portbase = portbase;
+ isa_portshift = portshift;
+ isa_sysctl_header = register_sysctl_table(ctl_bus, 0);
+}
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
new file mode 100644
index 00000000000..8f74e24536b
--- /dev/null
+++ b/arch/arm/kernel/iwmmxt.S
@@ -0,0 +1,320 @@
+/*
+ * linux/arch/arm/kernel/iwmmxt.S
+ *
+ * XScale iWMMXt (Concan) context switching and handling
+ *
+ * Initial code:
+ * Copyright (c) 2003, Intel Corporation
+ *
+ * Full lazy switching support, optimizations and more, by Nicolas Pitre
+* Copyright (c) 2003-2004, MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+#include <asm/constants.h>
+
+#define MMX_WR0 (0x00)
+#define MMX_WR1 (0x08)
+#define MMX_WR2 (0x10)
+#define MMX_WR3 (0x18)
+#define MMX_WR4 (0x20)
+#define MMX_WR5 (0x28)
+#define MMX_WR6 (0x30)
+#define MMX_WR7 (0x38)
+#define MMX_WR8 (0x40)
+#define MMX_WR9 (0x48)
+#define MMX_WR10 (0x50)
+#define MMX_WR11 (0x58)
+#define MMX_WR12 (0x60)
+#define MMX_WR13 (0x68)
+#define MMX_WR14 (0x70)
+#define MMX_WR15 (0x78)
+#define MMX_WCSSF (0x80)
+#define MMX_WCASF (0x84)
+#define MMX_WCGR0 (0x88)
+#define MMX_WCGR1 (0x8C)
+#define MMX_WCGR2 (0x90)
+#define MMX_WCGR3 (0x94)
+
+#define MMX_SIZE (0x98)
+
+ .text
+
+/*
+ * Lazy switching of Concan coprocessor context
+ *
+ * r10 = struct thread_info pointer
+ * r9 = ret_from_exception
+ * lr = undefined instr exit
+ *
+ * called from prefetch exception handler with interrupts disabled
+ */
+
+ENTRY(iwmmxt_task_enable)
+
+ mrc p15, 0, r2, c15, c1, 0
+ tst r2, #0x3 @ CP0 and CP1 accessible?
+ movne pc, lr @ if so no business here
+ orr r2, r2, #0x3 @ enable access to CP0 and CP1
+ mcr p15, 0, r2, c15, c1, 0
+
+ ldr r3, =concan_owner
+ add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area
+ ldr r2, [sp, #60] @ current task pc value
+ ldr r1, [r3] @ get current Concan owner
+ str r0, [r3] @ this task now owns Concan regs
+ sub r2, r2, #4 @ adjust pc back
+ str r2, [sp, #60]
+
+ mrc p15, 0, r2, c2, c0, 0
+ mov r2, r2 @ cpwait
+
+ teq r1, #0 @ test for last ownership
+ mov lr, r9 @ normal exit from exception
+ beq concan_load @ no owner, skip save
+
+concan_save:
+
+ tmrc r2, wCon
+
+ @ CUP? wCx
+ tst r2, #0x1
+ beq 1f
+
+concan_dump:
+
+ wstrw wCSSF, [r1, #MMX_WCSSF]
+ wstrw wCASF, [r1, #MMX_WCASF]
+ wstrw wCGR0, [r1, #MMX_WCGR0]
+ wstrw wCGR1, [r1, #MMX_WCGR1]
+ wstrw wCGR2, [r1, #MMX_WCGR2]
+ wstrw wCGR3, [r1, #MMX_WCGR3]
+
+1: @ MUP? wRn
+ tst r2, #0x2
+ beq 2f
+
+ wstrd wR0, [r1, #MMX_WR0]
+ wstrd wR1, [r1, #MMX_WR1]
+ wstrd wR2, [r1, #MMX_WR2]
+ wstrd wR3, [r1, #MMX_WR3]
+ wstrd wR4, [r1, #MMX_WR4]
+ wstrd wR5, [r1, #MMX_WR5]
+ wstrd wR6, [r1, #MMX_WR6]
+ wstrd wR7, [r1, #MMX_WR7]
+ wstrd wR8, [r1, #MMX_WR8]
+ wstrd wR9, [r1, #MMX_WR9]
+ wstrd wR10, [r1, #MMX_WR10]
+ wstrd wR11, [r1, #MMX_WR11]
+ wstrd wR12, [r1, #MMX_WR12]
+ wstrd wR13, [r1, #MMX_WR13]
+ wstrd wR14, [r1, #MMX_WR14]
+ wstrd wR15, [r1, #MMX_WR15]
+
+2: teq r0, #0 @ anything to load?
+ moveq pc, lr
+
+concan_load:
+
+ @ Load wRn
+ wldrd wR0, [r0, #MMX_WR0]
+ wldrd wR1, [r0, #MMX_WR1]
+ wldrd wR2, [r0, #MMX_WR2]
+ wldrd wR3, [r0, #MMX_WR3]
+ wldrd wR4, [r0, #MMX_WR4]
+ wldrd wR5, [r0, #MMX_WR5]
+ wldrd wR6, [r0, #MMX_WR6]
+ wldrd wR7, [r0, #MMX_WR7]
+ wldrd wR8, [r0, #MMX_WR8]
+ wldrd wR9, [r0, #MMX_WR9]
+ wldrd wR10, [r0, #MMX_WR10]
+ wldrd wR11, [r0, #MMX_WR11]
+ wldrd wR12, [r0, #MMX_WR12]
+ wldrd wR13, [r0, #MMX_WR13]
+ wldrd wR14, [r0, #MMX_WR14]
+ wldrd wR15, [r0, #MMX_WR15]
+
+ @ Load wCx
+ wldrw wCSSF, [r0, #MMX_WCSSF]
+ wldrw wCASF, [r0, #MMX_WCASF]
+ wldrw wCGR0, [r0, #MMX_WCGR0]
+ wldrw wCGR1, [r0, #MMX_WCGR1]
+ wldrw wCGR2, [r0, #MMX_WCGR2]
+ wldrw wCGR3, [r0, #MMX_WCGR3]
+
+ @ clear CUP/MUP (only if r1 != 0)
+ teq r1, #0
+ mov r2, #0
+ moveq pc, lr
+ tmcr wCon, r2
+ mov pc, lr
+
+/*
+ * Back up Concan regs to save area and disable access to them
+ * (mainly for gdb or sleep mode usage)
+ *
+ * r0 = struct thread_info pointer of target task or NULL for any
+ */
+
+ENTRY(iwmmxt_task_disable)
+
+ stmfd sp!, {r4, lr}
+
+ mrs ip, cpsr
+ orr r2, ip, #PSR_I_BIT @ disable interrupts
+ msr cpsr_c, r2
+
+ ldr r3, =concan_owner
+ add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
+ ldr r1, [r3] @ get current Concan owner
+ teq r1, #0 @ any current owner?
+ beq 1f @ no: quit
+ teq r0, #0 @ any owner?
+ teqne r1, r2 @ or specified one?
+ bne 1f @ no: quit
+
+ mrc p15, 0, r4, c15, c1, 0
+ orr r4, r4, #0x3 @ enable access to CP0 and CP1
+ mcr p15, 0, r4, c15, c1, 0
+ mov r0, #0 @ nothing to load
+ str r0, [r3] @ no more current owner
+ mrc p15, 0, r2, c2, c0, 0
+ mov r2, r2 @ cpwait
+ bl concan_save
+
+ bic r4, r4, #0x3 @ disable access to CP0 and CP1
+ mcr p15, 0, r4, c15, c1, 0
+ mrc p15, 0, r2, c2, c0, 0
+ mov r2, r2 @ cpwait
+
+1: msr cpsr_c, ip @ restore interrupt mode
+ ldmfd sp!, {r4, pc}
+
+/*
+ * Copy Concan state to given memory address
+ *
+ * r0 = struct thread_info pointer of target task
+ * r1 = memory address where to store Concan state
+ *
+ * this is called mainly in the creation of signal stack frames
+ */
+
+ENTRY(iwmmxt_task_copy)
+
+ mrs ip, cpsr
+ orr r2, ip, #PSR_I_BIT @ disable interrupts
+ msr cpsr_c, r2
+
+ ldr r3, =concan_owner
+ add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
+ ldr r3, [r3] @ get current Concan owner
+ teq r2, r3 @ does this task own it...
+ beq 1f
+
+ @ current Concan values are in the task save area
+ msr cpsr_c, ip @ restore interrupt mode
+ mov r0, r1
+ mov r1, r2
+ mov r2, #MMX_SIZE
+ b memcpy
+
+1: @ this task owns Concan regs -- grab a copy from there
+ mov r0, #0 @ nothing to load
+ mov r2, #3 @ save all regs
+ mov r3, lr @ preserve return address
+ bl concan_dump
+ msr cpsr_c, ip @ restore interrupt mode
+ mov pc, r3
+
+/*
+ * Restore Concan state from given memory address
+ *
+ * r0 = struct thread_info pointer of target task
+ * r1 = memory address where to get Concan state from
+ *
+ * this is used to restore Concan state when unwinding a signal stack frame
+ */
+
+ENTRY(iwmmxt_task_restore)
+
+ mrs ip, cpsr
+ orr r2, ip, #PSR_I_BIT @ disable interrupts
+ msr cpsr_c, r2
+
+ ldr r3, =concan_owner
+ add r2, r0, #TI_IWMMXT_STATE @ get task Concan save area
+ ldr r3, [r3] @ get current Concan owner
+ bic r2, r2, #0x7 @ 64-bit alignment
+ teq r2, r3 @ does this task own it...
+ beq 1f
+
+ @ this task doesn't own Concan regs -- use its save area
+ msr cpsr_c, ip @ restore interrupt mode
+ mov r0, r2
+ mov r2, #MMX_SIZE
+ b memcpy
+
+1: @ this task owns Concan regs -- load them directly
+ mov r0, r1
+ mov r1, #0 @ don't clear CUP/MUP
+ mov r3, lr @ preserve return address
+ bl concan_load
+ msr cpsr_c, ip @ restore interrupt mode
+ mov pc, r3
+
+/*
+ * Concan handling on task switch
+ *
+ * r0 = previous task_struct pointer (must be preserved)
+ * r1 = previous thread_info pointer
+ * r2 = next thread_info.cpu_domain pointer (must be preserved)
+ *
+ * Called only from __switch_to with task preemption disabled.
+ * No need to care about preserving r4 and above.
+ */
+ENTRY(iwmmxt_task_switch)
+
+ mrc p15, 0, r4, c15, c1, 0
+ tst r4, #0x3 @ CP0 and CP1 accessible?
+ bne 1f @ yes: block them for next task
+
+ ldr r5, =concan_owner
+ add r6, r2, #(TI_IWMMXT_STATE - TI_CPU_DOMAIN) @ get next task Concan save area
+ ldr r5, [r5] @ get current Concan owner
+ teq r5, r6 @ next task owns it?
+ movne pc, lr @ no: leave Concan disabled
+
+1: eor r4, r4, #3 @ flip Concan access
+ mcr p15, 0, r4, c15, c1, 0
+
+ mrc p15, 0, r4, c2, c0, 0
+ sub pc, lr, r4, lsr #32 @ cpwait and return
+
+/*
+ * Remove Concan ownership of given task
+ *
+ * r0 = struct thread_info pointer
+ */
+ENTRY(iwmmxt_task_release)
+
+ mrs r2, cpsr
+ orr ip, r2, #PSR_I_BIT @ disable interrupts
+ msr cpsr_c, ip
+ ldr r3, =concan_owner
+ add r0, r0, #TI_IWMMXT_STATE @ get task Concan save area
+ ldr r1, [r3] @ get current Concan owner
+ eors r0, r0, r1 @ if equal...
+ streq r0, [r3] @ then clear ownership
+ msr cpsr_c, r2 @ restore interrupts
+ mov pc, lr
+
+ .data
+concan_owner:
+ .word 0
+
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
new file mode 100644
index 00000000000..1a85cfdad5a
--- /dev/null
+++ b/arch/arm/kernel/module.c
@@ -0,0 +1,152 @@
+/*
+ * linux/arch/arm/kernel/module.c
+ *
+ * Copyright (C) 2002 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Module allocation method suggested by Andi Kleen.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_XIP_KERNEL
+/*
+ * The XIP kernel text is mapped in the module area for modules and
+ * some other stuff to work without any indirect relocations.
+ * MODULE_START is redefined here and not in asm/memory.h to avoid
+ * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
+ */
+extern void _etext;
+#undef MODULE_START
+#define MODULE_START (((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK)
+#endif
+
+void *module_alloc(unsigned long size)
+{
+ struct vm_struct *area;
+
+ size = PAGE_ALIGN(size);
+ if (!size)
+ return NULL;
+
+ area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
+ if (!area)
+ return NULL;
+
+ return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
+}
+
+void module_free(struct module *module, void *region)
+{
+ vfree(region);
+}
+
+int module_frob_arch_sections(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+ char *secstrings,
+ struct module *mod)
+{
+ return 0;
+}
+
+int
+apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
+ unsigned int relindex, struct module *module)
+{
+ Elf32_Shdr *symsec = sechdrs + symindex;
+ Elf32_Shdr *relsec = sechdrs + relindex;
+ Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
+ Elf32_Rel *rel = (void *)relsec->sh_addr;
+ unsigned int i;
+
+ for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
+ unsigned long loc;
+ Elf32_Sym *sym;
+ s32 offset;
+
+ offset = ELF32_R_SYM(rel->r_info);
+ if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
+ printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n",
+ module->name, relindex, i);
+ return -ENOEXEC;
+ }
+
+ sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
+
+ if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
+ printk(KERN_ERR "%s: out of bounds relocation, "
+ "section %d reloc %d offset %d size %d\n",
+ module->name, relindex, i, rel->r_offset,
+ dstsec->sh_size);
+ return -ENOEXEC;
+ }
+
+ loc = dstsec->sh_addr + rel->r_offset;
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
+ case R_ARM_ABS32:
+ *(u32 *)loc += sym->st_value;
+ break;
+
+ case R_ARM_PC24:
+ offset = (*(u32 *)loc & 0x00ffffff) << 2;
+ if (offset & 0x02000000)
+ offset -= 0x04000000;
+
+ offset += sym->st_value - loc;
+ if (offset & 3 ||
+ offset <= (s32)0xfc000000 ||
+ offset >= (s32)0x04000000) {
+ printk(KERN_ERR
+ "%s: relocation out of range, section "
+ "%d reloc %d sym '%s'\n", module->name,
+ relindex, i, strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+
+ offset >>= 2;
+
+ *(u32 *)loc &= 0xff000000;
+ *(u32 *)loc |= offset & 0x00ffffff;
+ break;
+
+ default:
+ printk(KERN_ERR "%s: unknown relocation: %u\n",
+ module->name, ELF32_R_TYPE(rel->r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+
+int
+apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec, struct module *module)
+{
+ printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
+ module->name);
+ return -ENOEXEC;
+}
+
+int
+module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *module)
+{
+ return 0;
+}
+
+void
+module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
new file mode 100644
index 00000000000..dbd8ca89b38
--- /dev/null
+++ b/arch/arm/kernel/process.c
@@ -0,0 +1,460 @@
+/*
+ * linux/arch/arm/kernel/process.c
+ *
+ * Copyright (C) 1996-2000 Russell King - Converted to ARM.
+ * Original Copyright (C) 1995 Linus Torvalds
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <stdarg.h>
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+#include <linux/kallsyms.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/leds.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+
+extern const char *processor_modes[];
+extern void setup_mm_for_reboot(char mode);
+
+static volatile int hlt_counter;
+
+#include <asm/arch/system.h>
+
+void disable_hlt(void)
+{
+ hlt_counter++;
+}
+
+EXPORT_SYMBOL(disable_hlt);
+
+void enable_hlt(void)
+{
+ hlt_counter--;
+}
+
+EXPORT_SYMBOL(enable_hlt);
+
+static int __init nohlt_setup(char *__unused)
+{
+ hlt_counter = 1;
+ return 1;
+}
+
+static int __init hlt_setup(char *__unused)
+{
+ hlt_counter = 0;
+ return 1;
+}
+
+__setup("nohlt", nohlt_setup);
+__setup("hlt", hlt_setup);
+
+/*
+ * The following aren't currently used.
+ */
+void (*pm_idle)(void);
+EXPORT_SYMBOL(pm_idle);
+
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+/*
+ * This is our default idle handler. We need to disable
+ * interrupts here to ensure we don't miss a wakeup call.
+ */
+void default_idle(void)
+{
+ local_irq_disable();
+ if (!need_resched() && !hlt_counter)
+ arch_idle();
+ local_irq_enable();
+}
+
+/*
+ * The idle thread. We try to conserve power, while trying to keep
+ * overall latency low. The architecture specific idle is passed
+ * a value to indicate the level of "idleness" of the system.
+ */
+void cpu_idle(void)
+{
+ local_fiq_enable();
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ void (*idle)(void) = pm_idle;
+ if (!idle)
+ idle = default_idle;
+ preempt_disable();
+ leds_event(led_idle_start);
+ while (!need_resched())
+ idle();
+ leds_event(led_idle_end);
+ preempt_enable();
+ schedule();
+ }
+}
+
+static char reboot_mode = 'h';
+
+int __init reboot_setup(char *str)
+{
+ reboot_mode = str[0];
+ return 1;
+}
+
+__setup("reboot=", reboot_setup);
+
+void machine_halt(void)
+{
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void machine_power_off(void)
+{
+ if (pm_power_off)
+ pm_power_off();
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+void machine_restart(char * __unused)
+{
+ /*
+ * Clean and disable cache, and turn off interrupts
+ */
+ cpu_proc_fin();
+
+ /*
+ * Tell the mm system that we are going to reboot -
+ * we may need it to insert some 1:1 mappings so that
+ * soft boot works.
+ */
+ setup_mm_for_reboot(reboot_mode);
+
+ /*
+ * Now call the architecture specific reboot code.
+ */
+ arch_reset(reboot_mode);
+
+ /*
+ * Whoops - the architecture was unable to reboot.
+ * Tell the user!
+ */
+ mdelay(1000);
+ printk("Reboot failed -- System halted\n");
+ while (1);
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void show_regs(struct pt_regs * regs)
+{
+ unsigned long flags;
+
+ flags = condition_codes(regs);
+
+ print_symbol("PC is at %s\n", instruction_pointer(regs));
+ print_symbol("LR is at %s\n", regs->ARM_lr);
+ printk("pc : [<%08lx>] lr : [<%08lx>] %s\n"
+ "sp : %08lx ip : %08lx fp : %08lx\n",
+ instruction_pointer(regs),
+ regs->ARM_lr, print_tainted(), regs->ARM_sp,
+ regs->ARM_ip, regs->ARM_fp);
+ printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
+ regs->ARM_r10, regs->ARM_r9,
+ regs->ARM_r8);
+ printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
+ regs->ARM_r7, regs->ARM_r6,
+ regs->ARM_r5, regs->ARM_r4);
+ printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
+ regs->ARM_r3, regs->ARM_r2,
+ regs->ARM_r1, regs->ARM_r0);
+ printk("Flags: %c%c%c%c",
+ flags & PSR_N_BIT ? 'N' : 'n',
+ flags & PSR_Z_BIT ? 'Z' : 'z',
+ flags & PSR_C_BIT ? 'C' : 'c',
+ flags & PSR_V_BIT ? 'V' : 'v');
+ printk(" IRQs o%s FIQs o%s Mode %s%s Segment %s\n",
+ interrupts_enabled(regs) ? "n" : "ff",
+ fast_interrupts_enabled(regs) ? "n" : "ff",
+ processor_modes[processor_mode(regs)],
+ thumb_mode(regs) ? " (T)" : "",
+ get_fs() == get_ds() ? "kernel" : "user");
+ {
+ unsigned int ctrl, transbase, dac;
+ __asm__ (
+ " mrc p15, 0, %0, c1, c0\n"
+ " mrc p15, 0, %1, c2, c0\n"
+ " mrc p15, 0, %2, c3, c0\n"
+ : "=r" (ctrl), "=r" (transbase), "=r" (dac));
+ printk("Control: %04X Table: %08X DAC: %08X\n",
+ ctrl, transbase, dac);
+ }
+}
+
+void show_fpregs(struct user_fp *regs)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ unsigned long *p;
+ char type;
+
+ p = (unsigned long *)(regs->fpregs + i);
+
+ switch (regs->ftype[i]) {
+ case 1: type = 'f'; break;
+ case 2: type = 'd'; break;
+ case 3: type = 'e'; break;
+ default: type = '?'; break;
+ }
+ if (regs->init_flag)
+ type = '?';
+
+ printk(" f%d(%c): %08lx %08lx %08lx%c",
+ i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' ');
+ }
+
+
+ printk("FPSR: %08lx FPCR: %08lx\n",
+ (unsigned long)regs->fpsr,
+ (unsigned long)regs->fpcr);
+}
+
+/*
+ * Task structure and kernel stack allocation.
+ */
+static unsigned long *thread_info_head;
+static unsigned int nr_thread_info;
+
+#define EXTRA_TASK_STRUCT 4
+#define ll_alloc_task_struct() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
+#define ll_free_task_struct(p) free_pages((unsigned long)(p),1)
+
+struct thread_info *alloc_thread_info(struct task_struct *task)
+{
+ struct thread_info *thread = NULL;
+
+ if (EXTRA_TASK_STRUCT) {
+ unsigned long *p = thread_info_head;
+
+ if (p) {
+ thread_info_head = (unsigned long *)p[0];
+ nr_thread_info -= 1;
+ }
+ thread = (struct thread_info *)p;
+ }
+
+ if (!thread)
+ thread = ll_alloc_task_struct();
+
+#ifdef CONFIG_MAGIC_SYSRQ
+ /*
+ * The stack must be cleared if you want SYSRQ-T to
+ * give sensible stack usage information
+ */
+ if (thread) {
+ char *p = (char *)thread;
+ memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
+ }
+#endif
+ return thread;
+}
+
+void free_thread_info(struct thread_info *thread)
+{
+ if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) {
+ unsigned long *p = (unsigned long *)thread;
+ p[0] = (unsigned long)thread_info_head;
+ thread_info_head = p;
+ nr_thread_info += 1;
+ } else
+ ll_free_task_struct(thread);
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+}
+
+static void default_fp_init(union fp_state *fp)
+{
+ memset(fp, 0, sizeof(union fp_state));
+}
+
+void (*fp_init)(union fp_state *) = default_fp_init;
+EXPORT_SYMBOL(fp_init);
+
+void flush_thread(void)
+{
+ struct thread_info *thread = current_thread_info();
+ struct task_struct *tsk = current;
+
+ memset(thread->used_cp, 0, sizeof(thread->used_cp));
+ memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
+#if defined(CONFIG_IWMMXT)
+ iwmmxt_task_release(thread);
+#endif
+ fp_init(&thread->fpstate);
+#if defined(CONFIG_VFP)
+ vfp_flush_thread(&thread->vfpstate);
+#endif
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+#if defined(CONFIG_VFP)
+ vfp_release_thread(&dead_task->thread_info->vfpstate);
+#endif
+#if defined(CONFIG_IWMMXT)
+ iwmmxt_task_release(dead_task->thread_info);
+#endif
+}
+
+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+int
+copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
+ unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
+{
+ struct thread_info *thread = p->thread_info;
+ struct pt_regs *childregs;
+
+ childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_SIZE - 8)) - 1;
+ *childregs = *regs;
+ childregs->ARM_r0 = 0;
+ childregs->ARM_sp = stack_start;
+
+ memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
+ thread->cpu_context.sp = (unsigned long)childregs;
+ thread->cpu_context.pc = (unsigned long)ret_from_fork;
+
+ if (clone_flags & CLONE_SETTLS)
+ thread->tp_value = regs->ARM_r3;
+
+ return 0;
+}
+
+/*
+ * fill in the fpe structure for a core dump...
+ */
+int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
+{
+ struct thread_info *thread = current_thread_info();
+ int used_math = thread->used_cp[1] | thread->used_cp[2];
+
+ if (used_math)
+ memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
+
+ return used_math != 0;
+}
+EXPORT_SYMBOL(dump_fpu);
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+ struct task_struct *tsk = current;
+
+ dump->magic = CMAGIC;
+ dump->start_code = tsk->mm->start_code;
+ dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
+
+ dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
+ dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ dump->u_ssize = 0;
+
+ dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
+ dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
+ dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm;
+ dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm;
+ dump->u_debugreg[4] = tsk->thread.debug.nsaved;
+
+ if (dump->start_stack < 0x04000000)
+ dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
+
+ dump->regs = *regs;
+ dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
+}
+EXPORT_SYMBOL(dump_thread);
+
+/*
+ * Shuffle the argument into the correct register before calling the
+ * thread function. r1 is the thread argument, r2 is the pointer to
+ * the thread function, and r3 points to the exit function.
+ */
+extern void kernel_thread_helper(void);
+asm( ".section .text\n"
+" .align\n"
+" .type kernel_thread_helper, #function\n"
+"kernel_thread_helper:\n"
+" mov r0, r1\n"
+" mov lr, r3\n"
+" mov pc, r2\n"
+" .size kernel_thread_helper, . - kernel_thread_helper\n"
+" .previous");
+
+/*
+ * Create a kernel thread.
+ */
+pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof(regs));
+
+ regs.ARM_r1 = (unsigned long)arg;
+ regs.ARM_r2 = (unsigned long)fn;
+ regs.ARM_r3 = (unsigned long)do_exit;
+ regs.ARM_pc = (unsigned long)kernel_thread_helper;
+ regs.ARM_cpsr = SVC_MODE;
+
+ return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
+}
+EXPORT_SYMBOL(kernel_thread);
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long fp, lr;
+ unsigned long stack_page;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ stack_page = 4096 + (unsigned long)p->thread_info;
+ fp = thread_saved_fp(p);
+ do {
+ if (fp < stack_page || fp > 4092+stack_page)
+ return 0;
+ lr = pc_pointer (((unsigned long *)fp)[-1]);
+ if (!in_sched_functions(lr))
+ return lr;
+ fp = *(unsigned long *) (fp - 12);
+ } while (count ++ < 16);
+ return 0;
+}
+EXPORT_SYMBOL(get_wchan);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
new file mode 100644
index 00000000000..efd7a341614
--- /dev/null
+++ b/arch/arm/kernel/ptrace.c
@@ -0,0 +1,861 @@
+/*
+ * linux/arch/arm/kernel/ptrace.c
+ *
+ * By Ross Biro 1/23/92
+ * edited by Linus Torvalds
+ * ARM modifications Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/security.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+
+#include "ptrace.h"
+
+#define REG_PC 15
+#define REG_PSR 16
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+#if 0
+/*
+ * Breakpoint SWI instruction: SWI &9F0001
+ */
+#define BREAKINST_ARM 0xef9f0001
+#define BREAKINST_THUMB 0xdf00 /* fill this in later */
+#else
+/*
+ * New breakpoints - use an undefined instruction. The ARM architecture
+ * reference manual guarantees that the following instruction space
+ * will produce an undefined instruction exception on all CPUs:
+ *
+ * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
+ * Thumb: 1101 1110 xxxx xxxx
+ */
+#define BREAKINST_ARM 0xe7f001f0
+#define BREAKINST_THUMB 0xde01
+#endif
+
+/*
+ * Get the address of the live pt_regs for the specified task.
+ * These are saved onto the top kernel stack when the process
+ * is not running.
+ *
+ * Note: if a user thread is execve'd from kernel space, the
+ * kernel stack will not be empty on entry to the kernel, so
+ * ptracing these tasks will fail.
+ */
+static inline struct pt_regs *
+get_user_regs(struct task_struct *task)
+{
+ return (struct pt_regs *)
+ ((unsigned long)task->thread_info + THREAD_SIZE -
+ 8 - sizeof(struct pt_regs));
+}
+
+/*
+ * this routine will get a word off of the processes privileged stack.
+ * the offset is how far from the base addr as stored in the THREAD.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline long get_user_reg(struct task_struct *task, int offset)
+{
+ return get_user_regs(task)->uregs[offset];
+}
+
+/*
+ * this routine will put a word on the processes privileged stack.
+ * the offset is how far from the base addr as stored in the THREAD.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline int
+put_user_reg(struct task_struct *task, int offset, long data)
+{
+ struct pt_regs newregs, *regs = get_user_regs(task);
+ int ret = -EINVAL;
+
+ newregs = *regs;
+ newregs.uregs[offset] = data;
+
+ if (valid_user_regs(&newregs)) {
+ regs->uregs[offset] = data;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static inline int
+read_u32(struct task_struct *task, unsigned long addr, u32 *res)
+{
+ int ret;
+
+ ret = access_process_vm(task, addr, res, sizeof(*res), 0);
+
+ return ret == sizeof(*res) ? 0 : -EIO;
+}
+
+static inline int
+read_instr(struct task_struct *task, unsigned long addr, u32 *res)
+{
+ int ret;
+
+ if (addr & 1) {
+ u16 val;
+ ret = access_process_vm(task, addr & ~1, &val, sizeof(val), 0);
+ ret = ret == sizeof(val) ? 0 : -EIO;
+ *res = val;
+ } else {
+ u32 val;
+ ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0);
+ ret = ret == sizeof(val) ? 0 : -EIO;
+ *res = val;
+ }
+ return ret;
+}
+
+/*
+ * Get value of register `rn' (in the instruction)
+ */
+static unsigned long
+ptrace_getrn(struct task_struct *child, unsigned long insn)
+{
+ unsigned int reg = (insn >> 16) & 15;
+ unsigned long val;
+
+ val = get_user_reg(child, reg);
+ if (reg == 15)
+ val = pc_pointer(val + 8);
+
+ return val;
+}
+
+/*
+ * Get value of operand 2 (in an ALU instruction)
+ */
+static unsigned long
+ptrace_getaluop2(struct task_struct *child, unsigned long insn)
+{
+ unsigned long val;
+ int shift;
+ int type;
+
+ if (insn & 1 << 25) {
+ val = insn & 255;
+ shift = (insn >> 8) & 15;
+ type = 3;
+ } else {
+ val = get_user_reg (child, insn & 15);
+
+ if (insn & (1 << 4))
+ shift = (int)get_user_reg (child, (insn >> 8) & 15);
+ else
+ shift = (insn >> 7) & 31;
+
+ type = (insn >> 5) & 3;
+ }
+
+ switch (type) {
+ case 0: val <<= shift; break;
+ case 1: val >>= shift; break;
+ case 2:
+ val = (((signed long)val) >> shift);
+ break;
+ case 3:
+ val = (val >> shift) | (val << (32 - shift));
+ break;
+ }
+ return val;
+}
+
+/*
+ * Get value of operand 2 (in a LDR instruction)
+ */
+static unsigned long
+ptrace_getldrop2(struct task_struct *child, unsigned long insn)
+{
+ unsigned long val;
+ int shift;
+ int type;
+
+ val = get_user_reg(child, insn & 15);
+ shift = (insn >> 7) & 31;
+ type = (insn >> 5) & 3;
+
+ switch (type) {
+ case 0: val <<= shift; break;
+ case 1: val >>= shift; break;
+ case 2:
+ val = (((signed long)val) >> shift);
+ break;
+ case 3:
+ val = (val >> shift) | (val << (32 - shift));
+ break;
+ }
+ return val;
+}
+
+#define OP_MASK 0x01e00000
+#define OP_AND 0x00000000
+#define OP_EOR 0x00200000
+#define OP_SUB 0x00400000
+#define OP_RSB 0x00600000
+#define OP_ADD 0x00800000
+#define OP_ADC 0x00a00000
+#define OP_SBC 0x00c00000
+#define OP_RSC 0x00e00000
+#define OP_ORR 0x01800000
+#define OP_MOV 0x01a00000
+#define OP_BIC 0x01c00000
+#define OP_MVN 0x01e00000
+
+static unsigned long
+get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn)
+{
+ u32 alt = 0;
+
+ switch (insn & 0x0e000000) {
+ case 0x00000000:
+ case 0x02000000: {
+ /*
+ * data processing
+ */
+ long aluop1, aluop2, ccbit;
+
+ if ((insn & 0xf000) != 0xf000)
+ break;
+
+ aluop1 = ptrace_getrn(child, insn);
+ aluop2 = ptrace_getaluop2(child, insn);
+ ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0;
+
+ switch (insn & OP_MASK) {
+ case OP_AND: alt = aluop1 & aluop2; break;
+ case OP_EOR: alt = aluop1 ^ aluop2; break;
+ case OP_SUB: alt = aluop1 - aluop2; break;
+ case OP_RSB: alt = aluop2 - aluop1; break;
+ case OP_ADD: alt = aluop1 + aluop2; break;
+ case OP_ADC: alt = aluop1 + aluop2 + ccbit; break;
+ case OP_SBC: alt = aluop1 - aluop2 + ccbit; break;
+ case OP_RSC: alt = aluop2 - aluop1 + ccbit; break;
+ case OP_ORR: alt = aluop1 | aluop2; break;
+ case OP_MOV: alt = aluop2; break;
+ case OP_BIC: alt = aluop1 & ~aluop2; break;
+ case OP_MVN: alt = ~aluop2; break;
+ }
+ break;
+ }
+
+ case 0x04000000:
+ case 0x06000000:
+ /*
+ * ldr
+ */
+ if ((insn & 0x0010f000) == 0x0010f000) {
+ unsigned long base;
+
+ base = ptrace_getrn(child, insn);
+ if (insn & 1 << 24) {
+ long aluop2;
+
+ if (insn & 0x02000000)
+ aluop2 = ptrace_getldrop2(child, insn);
+ else
+ aluop2 = insn & 0xfff;
+
+ if (insn & 1 << 23)
+ base += aluop2;
+ else
+ base -= aluop2;
+ }
+ if (read_u32(child, base, &alt) == 0)
+ alt = pc_pointer(alt);
+ }
+ break;
+
+ case 0x08000000:
+ /*
+ * ldm
+ */
+ if ((insn & 0x00108000) == 0x00108000) {
+ unsigned long base;
+ unsigned int nr_regs;
+
+ if (insn & (1 << 23)) {
+ nr_regs = hweight16(insn & 65535) << 2;
+
+ if (!(insn & (1 << 24)))
+ nr_regs -= 4;
+ } else {
+ if (insn & (1 << 24))
+ nr_regs = -4;
+ else
+ nr_regs = 0;
+ }
+
+ base = ptrace_getrn(child, insn);
+
+ if (read_u32(child, base + nr_regs, &alt) == 0)
+ alt = pc_pointer(alt);
+ break;
+ }
+ break;
+
+ case 0x0a000000: {
+ /*
+ * bl or b
+ */
+ signed long displ;
+ /* It's a branch/branch link: instead of trying to
+ * figure out whether the branch will be taken or not,
+ * we'll put a breakpoint at both locations. This is
+ * simpler, more reliable, and probably not a whole lot
+ * slower than the alternative approach of emulating the
+ * branch.
+ */
+ displ = (insn & 0x00ffffff) << 8;
+ displ = (displ >> 6) + 8;
+ if (displ != 0 && displ != 4)
+ alt = pc + displ;
+ }
+ break;
+ }
+
+ return alt;
+}
+
+static int
+swap_insn(struct task_struct *task, unsigned long addr,
+ void *old_insn, void *new_insn, int size)
+{
+ int ret;
+
+ ret = access_process_vm(task, addr, old_insn, size, 0);
+ if (ret == size)
+ ret = access_process_vm(task, addr, new_insn, size, 1);
+ return ret;
+}
+
+static void
+add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr)
+{
+ int nr = dbg->nsaved;
+
+ if (nr < 2) {
+ u32 new_insn = BREAKINST_ARM;
+ int res;
+
+ res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4);
+
+ if (res == 4) {
+ dbg->bp[nr].address = addr;
+ dbg->nsaved += 1;
+ }
+ } else
+ printk(KERN_ERR "ptrace: too many breakpoints\n");
+}
+
+/*
+ * Clear one breakpoint in the user program. We copy what the hardware
+ * does and use bit 0 of the address to indicate whether this is a Thumb
+ * breakpoint or an ARM breakpoint.
+ */
+static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp)
+{
+ unsigned long addr = bp->address;
+ union debug_insn old_insn;
+ int ret;
+
+ if (addr & 1) {
+ ret = swap_insn(task, addr & ~1, &old_insn.thumb,
+ &bp->insn.thumb, 2);
+
+ if (ret != 2 || old_insn.thumb != BREAKINST_THUMB)
+ printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at "
+ "0x%08lx (0x%04x)\n", task->comm, task->pid,
+ addr, old_insn.thumb);
+ } else {
+ ret = swap_insn(task, addr & ~3, &old_insn.arm,
+ &bp->insn.arm, 4);
+
+ if (ret != 4 || old_insn.arm != BREAKINST_ARM)
+ printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at "
+ "0x%08lx (0x%08x)\n", task->comm, task->pid,
+ addr, old_insn.arm);
+ }
+}
+
+void ptrace_set_bpt(struct task_struct *child)
+{
+ struct pt_regs *regs;
+ unsigned long pc;
+ u32 insn;
+ int res;
+
+ regs = get_user_regs(child);
+ pc = instruction_pointer(regs);
+
+ if (thumb_mode(regs)) {
+ printk(KERN_WARNING "ptrace: can't handle thumb mode\n");
+ return;
+ }
+
+ res = read_instr(child, pc, &insn);
+ if (!res) {
+ struct debug_info *dbg = &child->thread.debug;
+ unsigned long alt;
+
+ dbg->nsaved = 0;
+
+ alt = get_branch_address(child, pc, insn);
+ if (alt)
+ add_breakpoint(child, dbg, alt);
+
+ /*
+ * Note that we ignore the result of setting the above
+ * breakpoint since it may fail. When it does, this is
+ * not so much an error, but a forewarning that we may
+ * be receiving a prefetch abort shortly.
+ *
+ * If we don't set this breakpoint here, then we can
+ * lose control of the thread during single stepping.
+ */
+ if (!alt || predicate(insn) != PREDICATE_ALWAYS)
+ add_breakpoint(child, dbg, pc + 4);
+ }
+}
+
+/*
+ * Ensure no single-step breakpoint is pending. Returns non-zero
+ * value if child was being single-stepped.
+ */
+void ptrace_cancel_bpt(struct task_struct *child)
+{
+ int i, nsaved = child->thread.debug.nsaved;
+
+ child->thread.debug.nsaved = 0;
+
+ if (nsaved > 2) {
+ printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
+ nsaved = 2;
+ }
+
+ for (i = 0; i < nsaved; i++)
+ clear_breakpoint(child, &child->thread.debug.bp[i]);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure the single step bit is not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ child->ptrace &= ~PT_SINGLESTEP;
+ ptrace_cancel_bpt(child);
+}
+
+/*
+ * Handle hitting a breakpoint.
+ */
+void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ ptrace_cancel_bpt(tsk);
+
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_BRKPT;
+ info.si_addr = (void __user *)instruction_pointer(regs);
+
+ force_sig_info(SIGTRAP, &info, tsk);
+}
+
+static int break_trap(struct pt_regs *regs, unsigned int instr)
+{
+ ptrace_break(current, regs);
+ return 0;
+}
+
+static struct undef_hook arm_break_hook = {
+ .instr_mask = 0x0fffffff,
+ .instr_val = 0x07f001f0,
+ .cpsr_mask = PSR_T_BIT,
+ .cpsr_val = 0,
+ .fn = break_trap,
+};
+
+static struct undef_hook thumb_break_hook = {
+ .instr_mask = 0xffff,
+ .instr_val = 0xde01,
+ .cpsr_mask = PSR_T_BIT,
+ .cpsr_val = PSR_T_BIT,
+ .fn = break_trap,
+};
+
+static int __init ptrace_break_init(void)
+{
+ register_undef_hook(&arm_break_hook);
+ register_undef_hook(&thumb_break_hook);
+ return 0;
+}
+
+core_initcall(ptrace_break_init);
+
+/*
+ * Read the word at offset "off" into the "struct user". We
+ * actually access the pt_regs stored on the kernel stack.
+ */
+static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
+ unsigned long __user *ret)
+{
+ unsigned long tmp;
+
+ if (off & 3 || off >= sizeof(struct user))
+ return -EIO;
+
+ tmp = 0;
+ if (off < sizeof(struct pt_regs))
+ tmp = get_user_reg(tsk, off >> 2);
+
+ return put_user(tmp, ret);
+}
+
+/*
+ * Write the word at offset "off" into "struct user". We
+ * actually access the pt_regs stored on the kernel stack.
+ */
+static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
+ unsigned long val)
+{
+ if (off & 3 || off >= sizeof(struct user))
+ return -EIO;
+
+ if (off >= sizeof(struct pt_regs))
+ return 0;
+
+ return put_user_reg(tsk, off >> 2, val);
+}
+
+/*
+ * Get all user integer registers.
+ */
+static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
+{
+ struct pt_regs *regs = get_user_regs(tsk);
+
+ return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
+}
+
+/*
+ * Set all user integer registers.
+ */
+static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
+{
+ struct pt_regs newregs;
+ int ret;
+
+ ret = -EFAULT;
+ if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
+ struct pt_regs *regs = get_user_regs(tsk);
+
+ ret = -EINVAL;
+ if (valid_user_regs(&newregs)) {
+ *regs = newregs;
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Get the child FPU state.
+ */
+static int ptrace_getfpregs(struct task_struct *tsk, void __user *ufp)
+{
+ return copy_to_user(ufp, &tsk->thread_info->fpstate,
+ sizeof(struct user_fp)) ? -EFAULT : 0;
+}
+
+/*
+ * Set the child FPU state.
+ */
+static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp)
+{
+ struct thread_info *thread = tsk->thread_info;
+ thread->used_cp[1] = thread->used_cp[2] = 1;
+ return copy_from_user(&thread->fpstate, ufp,
+ sizeof(struct user_fp)) ? -EFAULT : 0;
+}
+
+#ifdef CONFIG_IWMMXT
+
+/*
+ * Get the child iWMMXt state.
+ */
+static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
+{
+ struct thread_info *thread = tsk->thread_info;
+ void *ptr = &thread->fpstate;
+
+ if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
+ return -ENODATA;
+ iwmmxt_task_disable(thread); /* force it to ram */
+ /* The iWMMXt state is stored doubleword-aligned. */
+ if (((long) ptr) & 4)
+ ptr += 4;
+ return copy_to_user(ufp, ptr, 0x98) ? -EFAULT : 0;
+}
+
+/*
+ * Set the child iWMMXt state.
+ */
+static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
+{
+ struct thread_info *thread = tsk->thread_info;
+ void *ptr = &thread->fpstate;
+
+ if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
+ return -EACCES;
+ iwmmxt_task_release(thread); /* force a reload */
+ /* The iWMMXt state is stored doubleword-aligned. */
+ if (((long) ptr) & 4)
+ ptr += 4;
+ return copy_from_user(ptr, ufp, 0x98) ? -EFAULT : 0;
+}
+
+#endif
+
+static int do_ptrace(int request, struct task_struct *child, long addr, long data)
+{
+ unsigned long tmp;
+ int ret;
+
+ switch (request) {
+ /*
+ * read word at location "addr" in the child process.
+ */
+ case PTRACE_PEEKTEXT:
+ case PTRACE_PEEKDATA:
+ ret = access_process_vm(child, addr, &tmp,
+ sizeof(unsigned long), 0);
+ if (ret == sizeof(unsigned long))
+ ret = put_user(tmp, (unsigned long __user *) data);
+ else
+ ret = -EIO;
+ break;
+
+ case PTRACE_PEEKUSR:
+ ret = ptrace_read_user(child, addr, (unsigned long __user *)data);
+ break;
+
+ /*
+ * write the word at location addr.
+ */
+ case PTRACE_POKETEXT:
+ case PTRACE_POKEDATA:
+ ret = access_process_vm(child, addr, &data,
+ sizeof(unsigned long), 1);
+ if (ret == sizeof(unsigned long))
+ ret = 0;
+ else
+ ret = -EIO;
+ break;
+
+ case PTRACE_POKEUSR:
+ ret = ptrace_write_user(child, addr, data);
+ break;
+
+ /*
+ * continue/restart and stop at next (return from) syscall
+ */
+ case PTRACE_SYSCALL:
+ case PTRACE_CONT:
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ if (request == PTRACE_SYSCALL)
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ /* make sure single-step breakpoint is gone. */
+ child->ptrace &= ~PT_SINGLESTEP;
+ ptrace_cancel_bpt(child);
+ wake_up_process(child);
+ ret = 0;
+ break;
+
+ /*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL:
+ /* make sure single-step breakpoint is gone. */
+ child->ptrace &= ~PT_SINGLESTEP;
+ ptrace_cancel_bpt(child);
+ if (child->exit_state != EXIT_ZOMBIE) {
+ child->exit_code = SIGKILL;
+ wake_up_process(child);
+ }
+ ret = 0;
+ break;
+
+ /*
+ * execute single instruction.
+ */
+ case PTRACE_SINGLESTEP:
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ child->ptrace |= PT_SINGLESTEP;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ /* give it a chance to run. */
+ wake_up_process(child);
+ ret = 0;
+ break;
+
+ case PTRACE_DETACH:
+ ret = ptrace_detach(child, data);
+ break;
+
+ case PTRACE_GETREGS:
+ ret = ptrace_getregs(child, (void __user *)data);
+ break;
+
+ case PTRACE_SETREGS:
+ ret = ptrace_setregs(child, (void __user *)data);
+ break;
+
+ case PTRACE_GETFPREGS:
+ ret = ptrace_getfpregs(child, (void __user *)data);
+ break;
+
+ case PTRACE_SETFPREGS:
+ ret = ptrace_setfpregs(child, (void __user *)data);
+ break;
+
+#ifdef CONFIG_IWMMXT
+ case PTRACE_GETWMMXREGS:
+ ret = ptrace_getwmmxregs(child, (void __user *)data);
+ break;
+
+ case PTRACE_SETWMMXREGS:
+ ret = ptrace_setwmmxregs(child, (void __user *)data);
+ break;
+#endif
+
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(child->thread_info->tp_value,
+ (unsigned long __user *) data);
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
+{
+ struct task_struct *child;
+ int ret;
+
+ lock_kernel();
+ ret = -EPERM;
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->ptrace & PT_PTRACED)
+ goto out;
+ ret = security_ptrace(current->parent, current);
+ if (ret)
+ goto out;
+ /* set the ptrace bit in the process flags. */
+ current->ptrace |= PT_PTRACED;
+ ret = 0;
+ goto out;
+ }
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
+ read_unlock(&tasklist_lock);
+ if (!child)
+ goto out;
+
+ ret = -EPERM;
+ if (pid == 1) /* you may not mess with init */
+ goto out_tsk;
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+ }
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret == 0)
+ ret = do_ptrace(request, child, addr, data);
+
+out_tsk:
+ put_task_struct(child);
+out:
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage void syscall_trace(int why, struct pt_regs *regs)
+{
+ unsigned long ip;
+
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+
+ /*
+ * Save IP. IP is used to denote syscall entry/exit:
+ * IP = 0 -> entry, = 1 -> exit
+ */
+ ip = regs->ARM_ip;
+ regs->ARM_ip = why;
+
+ /* the 0x80 provides a way for the tracing parent to distinguish
+ between a syscall stop and SIGTRAP delivery */
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+ regs->ARM_ip = ip;
+}
diff --git a/arch/arm/kernel/ptrace.h b/arch/arm/kernel/ptrace.h
new file mode 100644
index 00000000000..f7cad13a22e
--- /dev/null
+++ b/arch/arm/kernel/ptrace.h
@@ -0,0 +1,12 @@
+/*
+ * linux/arch/arm/kernel/ptrace.h
+ *
+ * Copyright (C) 2000-2003 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+extern void ptrace_cancel_bpt(struct task_struct *);
+extern void ptrace_set_bpt(struct task_struct *);
+extern void ptrace_break(struct task_struct *, struct pt_regs *);
diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c
new file mode 100644
index 00000000000..ac423e3e224
--- /dev/null
+++ b/arch/arm/kernel/semaphore.c
@@ -0,0 +1,220 @@
+/*
+ * ARM semaphore implementation, taken from
+ *
+ * i386 semaphore implementation.
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ *
+ * Modified for ARM by Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <asm/semaphore.h>
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to acquire the semaphore, while the "sleeping"
+ * variable is a count of such acquires.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * "sleeping" and the contention routine ordering is
+ * protected by the semaphore spinlock.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+
+/*
+ * Logic:
+ * - only on a boundary condition do we need to care. When we go
+ * from a negative count to a non-negative, we wake people up.
+ * - when we go from a non-negative count to a negative do we
+ * (a) synchronize with the "sleeper" count and (b) make sure
+ * that we're on the wakeup list before we synchronize so that
+ * we cannot lose wakeup events.
+ */
+
+void __up(struct semaphore *sem)
+{
+ wake_up(&sem->wait);
+}
+
+static DEFINE_SPINLOCK(semaphore_lock);
+
+void __sched __down(struct semaphore * sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ spin_lock_irq(&semaphore_lock);
+ sem->sleepers++;
+ for (;;) {
+ int sleepers = sem->sleepers;
+
+ /*
+ * Add "everybody else" into it. They aren't
+ * playing, because we own the spinlock.
+ */
+ if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+ sem->sleepers = 0;
+ break;
+ }
+ sem->sleepers = 1; /* us - see -1 above */
+ spin_unlock_irq(&semaphore_lock);
+
+ schedule();
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ spin_lock_irq(&semaphore_lock);
+ }
+ spin_unlock_irq(&semaphore_lock);
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+ wake_up(&sem->wait);
+}
+
+int __sched __down_interruptible(struct semaphore * sem)
+{
+ int retval = 0;
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+ tsk->state = TASK_INTERRUPTIBLE;
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ spin_lock_irq(&semaphore_lock);
+ sem->sleepers ++;
+ for (;;) {
+ int sleepers = sem->sleepers;
+
+ /*
+ * With signals pending, this turns into
+ * the trylock failure case - we won't be
+ * sleeping, and we* can't get the lock as
+ * it has contention. Just correct the count
+ * and exit.
+ */
+ if (signal_pending(current)) {
+ retval = -EINTR;
+ sem->sleepers = 0;
+ atomic_add(sleepers, &sem->count);
+ break;
+ }
+
+ /*
+ * Add "everybody else" into it. They aren't
+ * playing, because we own the spinlock. The
+ * "-1" is because we're still hoping to get
+ * the lock.
+ */
+ if (!atomic_add_negative(sleepers - 1, &sem->count)) {
+ sem->sleepers = 0;
+ break;
+ }
+ sem->sleepers = 1; /* us - see -1 above */
+ spin_unlock_irq(&semaphore_lock);
+
+ schedule();
+ tsk->state = TASK_INTERRUPTIBLE;
+ spin_lock_irq(&semaphore_lock);
+ }
+ spin_unlock_irq(&semaphore_lock);
+ tsk->state = TASK_RUNNING;
+ remove_wait_queue(&sem->wait, &wait);
+ wake_up(&sem->wait);
+ return retval;
+}
+
+/*
+ * Trylock failed - make sure we correct for
+ * having decremented the count.
+ *
+ * We could have done the trylock with a
+ * single "cmpxchg" without failure cases,
+ * but then it wouldn't work on a 386.
+ */
+int __down_trylock(struct semaphore * sem)
+{
+ int sleepers;
+ unsigned long flags;
+
+ spin_lock_irqsave(&semaphore_lock, flags);
+ sleepers = sem->sleepers + 1;
+ sem->sleepers = 0;
+
+ /*
+ * Add "everybody else" and us into it. They aren't
+ * playing, because we own the spinlock.
+ */
+ if (!atomic_add_negative(sleepers, &sem->count))
+ wake_up(&sem->wait);
+
+ spin_unlock_irqrestore(&semaphore_lock, flags);
+ return 1;
+}
+
+/*
+ * The semaphore operations have a special calling sequence that
+ * allow us to do a simpler in-line version of them. These routines
+ * need to convert that sequence back into the C sequence when
+ * there is contention on the semaphore.
+ *
+ * ip contains the semaphore pointer on entry. Save the C-clobbered
+ * registers (r0 to r3 and lr), but not ip, as we use it as a return
+ * value in some cases..
+ */
+asm(" .section .sched.text,\"ax\" \n\
+ .align 5 \n\
+ .globl __down_failed \n\
+__down_failed: \n\
+ stmfd sp!, {r0 - r3, lr} \n\
+ mov r0, ip \n\
+ bl __down \n\
+ ldmfd sp!, {r0 - r3, pc} \n\
+ \n\
+ .align 5 \n\
+ .globl __down_interruptible_failed \n\
+__down_interruptible_failed: \n\
+ stmfd sp!, {r0 - r3, lr} \n\
+ mov r0, ip \n\
+ bl __down_interruptible \n\
+ mov ip, r0 \n\
+ ldmfd sp!, {r0 - r3, pc} \n\
+ \n\
+ .align 5 \n\
+ .globl __down_trylock_failed \n\
+__down_trylock_failed: \n\
+ stmfd sp!, {r0 - r3, lr} \n\
+ mov r0, ip \n\
+ bl __down_trylock \n\
+ mov ip, r0 \n\
+ ldmfd sp!, {r0 - r3, pc} \n\
+ \n\
+ .align 5 \n\
+ .globl __up_wakeup \n\
+__up_wakeup: \n\
+ stmfd sp!, {r0 - r3, lr} \n\
+ mov r0, ip \n\
+ bl __up \n\
+ ldmfd sp!, {r0 - r3, pc} \n\
+ ");
+
+EXPORT_SYMBOL(__down_failed);
+EXPORT_SYMBOL(__down_interruptible_failed);
+EXPORT_SYMBOL(__down_trylock_failed);
+EXPORT_SYMBOL(__up_wakeup);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
new file mode 100644
index 00000000000..c2a7da3ac0f
--- /dev/null
+++ b/arch/arm/kernel/setup.c
@@ -0,0 +1,875 @@
+/*
+ * linux/arch/arm/kernel/setup.c
+ *
+ * Copyright (C) 1995-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/utsname.h>
+#include <linux/initrd.h>
+#include <linux/console.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/root_dev.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+
+#include <asm/cpu.h>
+#include <asm/elf.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/procinfo.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <asm/mach/time.h>
+
+#ifndef MEM_SIZE
+#define MEM_SIZE (16*1024*1024)
+#endif
+
+#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
+char fpe_type[8];
+
+static int __init fpe_setup(char *line)
+{
+ memcpy(fpe_type, line, 8);
+ return 1;
+}
+
+__setup("fpe=", fpe_setup);
+#endif
+
+extern unsigned int mem_fclk_21285;
+extern void paging_init(struct meminfo *, struct machine_desc *desc);
+extern void convert_to_tag_list(struct tag *tags);
+extern void squash_mem_tags(struct tag *tag);
+extern void reboot_setup(char *str);
+extern int root_mountflags;
+extern void _stext, _text, _etext, __data_start, _edata, _end;
+
+unsigned int processor_id;
+unsigned int __machine_arch_type;
+EXPORT_SYMBOL(__machine_arch_type);
+
+unsigned int system_rev;
+EXPORT_SYMBOL(system_rev);
+
+unsigned int system_serial_low;
+EXPORT_SYMBOL(system_serial_low);
+
+unsigned int system_serial_high;
+EXPORT_SYMBOL(system_serial_high);
+
+unsigned int elf_hwcap;
+EXPORT_SYMBOL(elf_hwcap);
+
+
+#ifdef MULTI_CPU
+struct processor processor;
+#endif
+#ifdef MULTI_TLB
+struct cpu_tlb_fns cpu_tlb;
+#endif
+#ifdef MULTI_USER
+struct cpu_user_fns cpu_user;
+#endif
+#ifdef MULTI_CACHE
+struct cpu_cache_fns cpu_cache;
+#endif
+
+char elf_platform[ELF_PLATFORM_SIZE];
+EXPORT_SYMBOL(elf_platform);
+
+unsigned long phys_initrd_start __initdata = 0;
+unsigned long phys_initrd_size __initdata = 0;
+
+static struct meminfo meminfo __initdata = { 0, };
+static const char *cpu_name;
+static const char *machine_name;
+static char command_line[COMMAND_LINE_SIZE];
+
+static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
+static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
+#define ENDIANNESS ((char)endian_test.l)
+
+DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
+
+/*
+ * Standard memory resources
+ */
+static struct resource mem_res[] = {
+ { "Video RAM", 0, 0, IORESOURCE_MEM },
+ { "Kernel text", 0, 0, IORESOURCE_MEM },
+ { "Kernel data", 0, 0, IORESOURCE_MEM }
+};
+
+#define video_ram mem_res[0]
+#define kernel_code mem_res[1]
+#define kernel_data mem_res[2]
+
+static struct resource io_res[] = {
+ { "reserved", 0x3bc, 0x3be, IORESOURCE_IO | IORESOURCE_BUSY },
+ { "reserved", 0x378, 0x37f, IORESOURCE_IO | IORESOURCE_BUSY },
+ { "reserved", 0x278, 0x27f, IORESOURCE_IO | IORESOURCE_BUSY }
+};
+
+#define lp0 io_res[0]
+#define lp1 io_res[1]
+#define lp2 io_res[2]
+
+static const char *cache_types[16] = {
+ "write-through",
+ "write-back",
+ "write-back",
+ "undefined 3",
+ "undefined 4",
+ "undefined 5",
+ "write-back",
+ "write-back",
+ "undefined 8",
+ "undefined 9",
+ "undefined 10",
+ "undefined 11",
+ "undefined 12",
+ "undefined 13",
+ "write-back",
+ "undefined 15",
+};
+
+static const char *cache_clean[16] = {
+ "not required",
+ "read-block",
+ "cp15 c7 ops",
+ "undefined 3",
+ "undefined 4",
+ "undefined 5",
+ "cp15 c7 ops",
+ "cp15 c7 ops",
+ "undefined 8",
+ "undefined 9",
+ "undefined 10",
+ "undefined 11",
+ "undefined 12",
+ "undefined 13",
+ "cp15 c7 ops",
+ "undefined 15",
+};
+
+static const char *cache_lockdown[16] = {
+ "not supported",
+ "not supported",
+ "not supported",
+ "undefined 3",
+ "undefined 4",
+ "undefined 5",
+ "format A",
+ "format B",
+ "undefined 8",
+ "undefined 9",
+ "undefined 10",
+ "undefined 11",
+ "undefined 12",
+ "undefined 13",
+ "format C",
+ "undefined 15",
+};
+
+static const char *proc_arch[] = {
+ "undefined/unknown",
+ "3",
+ "4",
+ "4T",
+ "5",
+ "5T",
+ "5TE",
+ "5TEJ",
+ "6TEJ",
+ "?(10)",
+ "?(11)",
+ "?(12)",
+ "?(13)",
+ "?(14)",
+ "?(15)",
+ "?(16)",
+ "?(17)",
+};
+
+#define CACHE_TYPE(x) (((x) >> 25) & 15)
+#define CACHE_S(x) ((x) & (1 << 24))
+#define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
+#define CACHE_ISIZE(x) ((x) & 4095)
+
+#define CACHE_SIZE(y) (((y) >> 6) & 7)
+#define CACHE_ASSOC(y) (((y) >> 3) & 7)
+#define CACHE_M(y) ((y) & (1 << 2))
+#define CACHE_LINE(y) ((y) & 3)
+
+static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
+{
+ unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
+
+ printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
+ cpu, prefix,
+ mult << (8 + CACHE_SIZE(cache)),
+ (mult << CACHE_ASSOC(cache)) >> 1,
+ 8 << CACHE_LINE(cache),
+ 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
+ CACHE_LINE(cache)));
+}
+
+static void __init dump_cpu_info(int cpu)
+{
+ unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+ if (info != processor_id) {
+ printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
+ cache_types[CACHE_TYPE(info)]);
+ if (CACHE_S(info)) {
+ dump_cache("I cache", cpu, CACHE_ISIZE(info));
+ dump_cache("D cache", cpu, CACHE_DSIZE(info));
+ } else {
+ dump_cache("cache", cpu, CACHE_ISIZE(info));
+ }
+ }
+}
+
+int cpu_architecture(void)
+{
+ int cpu_arch;
+
+ if ((processor_id & 0x0000f000) == 0) {
+ cpu_arch = CPU_ARCH_UNKNOWN;
+ } else if ((processor_id & 0x0000f000) == 0x00007000) {
+ cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
+ } else {
+ cpu_arch = (processor_id >> 16) & 7;
+ if (cpu_arch)
+ cpu_arch += CPU_ARCH_ARMv3;
+ }
+
+ return cpu_arch;
+}
+
+/*
+ * These functions re-use the assembly code in head.S, which
+ * already provide the required functionality.
+ */
+extern struct proc_info_list *lookup_processor_type(void);
+extern struct machine_desc *lookup_machine_type(unsigned int);
+
+static void __init setup_processor(void)
+{
+ struct proc_info_list *list;
+
+ /*
+ * locate processor in the list of supported processor
+ * types. The linker builds this table for us from the
+ * entries in arch/arm/mm/proc-*.S
+ */
+ list = lookup_processor_type();
+ if (!list) {
+ printk("CPU configuration botched (ID %08x), unable "
+ "to continue.\n", processor_id);
+ while (1);
+ }
+
+ cpu_name = list->cpu_name;
+
+#ifdef MULTI_CPU
+ processor = *list->proc;
+#endif
+#ifdef MULTI_TLB
+ cpu_tlb = *list->tlb;
+#endif
+#ifdef MULTI_USER
+ cpu_user = *list->user;
+#endif
+#ifdef MULTI_CACHE
+ cpu_cache = *list->cache;
+#endif
+
+ printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
+ cpu_name, processor_id, (int)processor_id & 15,
+ proc_arch[cpu_architecture()]);
+
+ dump_cpu_info(smp_processor_id());
+
+ sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
+ sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
+ elf_hwcap = list->elf_hwcap;
+
+ cpu_proc_init();
+}
+
+static struct machine_desc * __init setup_machine(unsigned int nr)
+{
+ struct machine_desc *list;
+
+ /*
+ * locate machine in the list of supported machines.
+ */
+ list = lookup_machine_type(nr);
+ if (!list) {
+ printk("Machine configuration botched (nr %d), unable "
+ "to continue.\n", nr);
+ while (1);
+ }
+
+ printk("Machine: %s\n", list->name);
+
+ return list;
+}
+
+static void __init early_initrd(char **p)
+{
+ unsigned long start, size;
+
+ start = memparse(*p, p);
+ if (**p == ',') {
+ size = memparse((*p) + 1, p);
+
+ phys_initrd_start = start;
+ phys_initrd_size = size;
+ }
+}
+__early_param("initrd=", early_initrd);
+
+/*
+ * Pick out the memory size. We look for mem=size@start,
+ * where start and size are "size[KkMm]"
+ */
+static void __init early_mem(char **p)
+{
+ static int usermem __initdata = 0;
+ unsigned long size, start;
+
+ /*
+ * If the user specifies memory size, we
+ * blow away any automatically generated
+ * size.
+ */
+ if (usermem == 0) {
+ usermem = 1;
+ meminfo.nr_banks = 0;
+ }
+
+ start = PHYS_OFFSET;
+ size = memparse(*p, p);
+ if (**p == '@')
+ start = memparse(*p + 1, p);
+
+ meminfo.bank[meminfo.nr_banks].start = start;
+ meminfo.bank[meminfo.nr_banks].size = size;
+ meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
+ meminfo.nr_banks += 1;
+}
+__early_param("mem=", early_mem);
+
+/*
+ * Initial parsing of the command line.
+ */
+static void __init parse_cmdline(char **cmdline_p, char *from)
+{
+ char c = ' ', *to = command_line;
+ int len = 0;
+
+ for (;;) {
+ if (c == ' ') {
+ extern struct early_params __early_begin, __early_end;
+ struct early_params *p;
+
+ for (p = &__early_begin; p < &__early_end; p++) {
+ int len = strlen(p->arg);
+
+ if (memcmp(from, p->arg, len) == 0) {
+ if (to != command_line)
+ to -= 1;
+ from += len;
+ p->fn(&from);
+
+ while (*from != ' ' && *from != '\0')
+ from++;
+ break;
+ }
+ }
+ }
+ c = *from++;
+ if (!c)
+ break;
+ if (COMMAND_LINE_SIZE <= ++len)
+ break;
+ *to++ = c;
+ }
+ *to = '\0';
+ *cmdline_p = command_line;
+}
+
+static void __init
+setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
+{
+#ifdef CONFIG_BLK_DEV_RAM
+ extern int rd_size, rd_image_start, rd_prompt, rd_doload;
+
+ rd_image_start = image_start;
+ rd_prompt = prompt;
+ rd_doload = doload;
+
+ if (rd_sz)
+ rd_size = rd_sz;
+#endif
+}
+
+static void __init
+request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
+{
+ struct resource *res;
+ int i;
+
+ kernel_code.start = virt_to_phys(&_text);
+ kernel_code.end = virt_to_phys(&_etext - 1);
+ kernel_data.start = virt_to_phys(&__data_start);
+ kernel_data.end = virt_to_phys(&_end - 1);
+
+ for (i = 0; i < mi->nr_banks; i++) {
+ unsigned long virt_start, virt_end;
+
+ if (mi->bank[i].size == 0)
+ continue;
+
+ virt_start = __phys_to_virt(mi->bank[i].start);
+ virt_end = virt_start + mi->bank[i].size - 1;
+
+ res = alloc_bootmem_low(sizeof(*res));
+ res->name = "System RAM";
+ res->start = __virt_to_phys(virt_start);
+ res->end = __virt_to_phys(virt_end);
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ request_resource(&iomem_resource, res);
+
+ if (kernel_code.start >= res->start &&
+ kernel_code.end <= res->end)
+ request_resource(res, &kernel_code);
+ if (kernel_data.start >= res->start &&
+ kernel_data.end <= res->end)
+ request_resource(res, &kernel_data);
+ }
+
+ if (mdesc->video_start) {
+ video_ram.start = mdesc->video_start;
+ video_ram.end = mdesc->video_end;
+ request_resource(&iomem_resource, &video_ram);
+ }
+
+ /*
+ * Some machines don't have the possibility of ever
+ * possessing lp0, lp1 or lp2
+ */
+ if (mdesc->reserve_lp0)
+ request_resource(&ioport_resource, &lp0);
+ if (mdesc->reserve_lp1)
+ request_resource(&ioport_resource, &lp1);
+ if (mdesc->reserve_lp2)
+ request_resource(&ioport_resource, &lp2);
+}
+
+/*
+ * Tag parsing.
+ *
+ * This is the new way of passing data to the kernel at boot time. Rather
+ * than passing a fixed inflexible structure to the kernel, we pass a list
+ * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
+ * tag for the list to be recognised (to distinguish the tagged list from
+ * a param_struct). The list is terminated with a zero-length tag (this tag
+ * is not parsed in any way).
+ */
+static int __init parse_tag_core(const struct tag *tag)
+{
+ if (tag->hdr.size > 2) {
+ if ((tag->u.core.flags & 1) == 0)
+ root_mountflags &= ~MS_RDONLY;
+ ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
+ }
+ return 0;
+}
+
+__tagtable(ATAG_CORE, parse_tag_core);
+
+static int __init parse_tag_mem32(const struct tag *tag)
+{
+ if (meminfo.nr_banks >= NR_BANKS) {
+ printk(KERN_WARNING
+ "Ignoring memory bank 0x%08x size %dKB\n",
+ tag->u.mem.start, tag->u.mem.size / 1024);
+ return -EINVAL;
+ }
+ meminfo.bank[meminfo.nr_banks].start = tag->u.mem.start;
+ meminfo.bank[meminfo.nr_banks].size = tag->u.mem.size;
+ meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(tag->u.mem.start);
+ meminfo.nr_banks += 1;
+
+ return 0;
+}
+
+__tagtable(ATAG_MEM, parse_tag_mem32);
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+struct screen_info screen_info = {
+ .orig_video_lines = 30,
+ .orig_video_cols = 80,
+ .orig_video_mode = 0,
+ .orig_video_ega_bx = 0,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 8
+};
+
+static int __init parse_tag_videotext(const struct tag *tag)
+{
+ screen_info.orig_x = tag->u.videotext.x;
+ screen_info.orig_y = tag->u.videotext.y;
+ screen_info.orig_video_page = tag->u.videotext.video_page;
+ screen_info.orig_video_mode = tag->u.videotext.video_mode;
+ screen_info.orig_video_cols = tag->u.videotext.video_cols;
+ screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
+ screen_info.orig_video_lines = tag->u.videotext.video_lines;
+ screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
+ screen_info.orig_video_points = tag->u.videotext.video_points;
+ return 0;
+}
+
+__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
+#endif
+
+static int __init parse_tag_ramdisk(const struct tag *tag)
+{
+ setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
+ (tag->u.ramdisk.flags & 2) == 0,
+ tag->u.ramdisk.start, tag->u.ramdisk.size);
+ return 0;
+}
+
+__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
+
+static int __init parse_tag_initrd(const struct tag *tag)
+{
+ printk(KERN_WARNING "ATAG_INITRD is deprecated; "
+ "please update your bootloader.\n");
+ phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
+ phys_initrd_size = tag->u.initrd.size;
+ return 0;
+}
+
+__tagtable(ATAG_INITRD, parse_tag_initrd);
+
+static int __init parse_tag_initrd2(const struct tag *tag)
+{
+ phys_initrd_start = tag->u.initrd.start;
+ phys_initrd_size = tag->u.initrd.size;
+ return 0;
+}
+
+__tagtable(ATAG_INITRD2, parse_tag_initrd2);
+
+static int __init parse_tag_serialnr(const struct tag *tag)
+{
+ system_serial_low = tag->u.serialnr.low;
+ system_serial_high = tag->u.serialnr.high;
+ return 0;
+}
+
+__tagtable(ATAG_SERIAL, parse_tag_serialnr);
+
+static int __init parse_tag_revision(const struct tag *tag)
+{
+ system_rev = tag->u.revision.rev;
+ return 0;
+}
+
+__tagtable(ATAG_REVISION, parse_tag_revision);
+
+static int __init parse_tag_cmdline(const struct tag *tag)
+{
+ strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
+ return 0;
+}
+
+__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
+
+/*
+ * Scan the tag table for this tag, and call its parse function.
+ * The tag table is built by the linker from all the __tagtable
+ * declarations.
+ */
+static int __init parse_tag(const struct tag *tag)
+{
+ extern struct tagtable __tagtable_begin, __tagtable_end;
+ struct tagtable *t;
+
+ for (t = &__tagtable_begin; t < &__tagtable_end; t++)
+ if (tag->hdr.tag == t->tag) {
+ t->parse(tag);
+ break;
+ }
+
+ return t < &__tagtable_end;
+}
+
+/*
+ * Parse all tags in the list, checking both the global and architecture
+ * specific tag tables.
+ */
+static void __init parse_tags(const struct tag *t)
+{
+ for (; t->hdr.size; t = tag_next(t))
+ if (!parse_tag(t))
+ printk(KERN_WARNING
+ "Ignoring unrecognised tag 0x%08x\n",
+ t->hdr.tag);
+}
+
+/*
+ * This holds our defaults.
+ */
+static struct init_tags {
+ struct tag_header hdr1;
+ struct tag_core core;
+ struct tag_header hdr2;
+ struct tag_mem32 mem;
+ struct tag_header hdr3;
+} init_tags __initdata = {
+ { tag_size(tag_core), ATAG_CORE },
+ { 1, PAGE_SIZE, 0xff },
+ { tag_size(tag_mem32), ATAG_MEM },
+ { MEM_SIZE, PHYS_OFFSET },
+ { 0, ATAG_NONE }
+};
+
+static void (*init_machine)(void) __initdata;
+
+static int __init customize_machine(void)
+{
+ /* customizes platform devices, or adds new ones */
+ if (init_machine)
+ init_machine();
+ return 0;
+}
+arch_initcall(customize_machine);
+
+void __init setup_arch(char **cmdline_p)
+{
+ struct tag *tags = (struct tag *)&init_tags;
+ struct machine_desc *mdesc;
+ char *from = default_command_line;
+
+ setup_processor();
+ mdesc = setup_machine(machine_arch_type);
+ machine_name = mdesc->name;
+
+ if (mdesc->soft_reboot)
+ reboot_setup("s");
+
+ if (mdesc->param_offset)
+ tags = phys_to_virt(mdesc->param_offset);
+
+ /*
+ * If we have the old style parameters, convert them to
+ * a tag list.
+ */
+ if (tags->hdr.tag != ATAG_CORE)
+ convert_to_tag_list(tags);
+ if (tags->hdr.tag != ATAG_CORE)
+ tags = (struct tag *)&init_tags;
+
+ if (mdesc->fixup)
+ mdesc->fixup(mdesc, tags, &from, &meminfo);
+
+ if (tags->hdr.tag == ATAG_CORE) {
+ if (meminfo.nr_banks != 0)
+ squash_mem_tags(tags);
+ parse_tags(tags);
+ }
+
+ init_mm.start_code = (unsigned long) &_text;
+ init_mm.end_code = (unsigned long) &_etext;
+ init_mm.end_data = (unsigned long) &_edata;
+ init_mm.brk = (unsigned long) &_end;
+
+ memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+ parse_cmdline(cmdline_p, from);
+ paging_init(&meminfo, mdesc);
+ request_standard_resources(&meminfo, mdesc);
+
+ /*
+ * Set up various architecture-specific pointers
+ */
+ init_arch_irq = mdesc->init_irq;
+ system_timer = mdesc->timer;
+ init_machine = mdesc->init_machine;
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+#endif
+}
+
+
+static int __init topology_init(void)
+{
+ int cpu;
+
+ for_each_cpu(cpu)
+ register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL);
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
+
+static const char *hwcap_str[] = {
+ "swp",
+ "half",
+ "thumb",
+ "26bit",
+ "fastmult",
+ "fpa",
+ "vfp",
+ "edsp",
+ "java",
+ NULL
+};
+
+static void
+c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
+{
+ unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
+
+ seq_printf(m, "%s size\t\t: %d\n"
+ "%s assoc\t\t: %d\n"
+ "%s line length\t: %d\n"
+ "%s sets\t\t: %d\n",
+ type, mult << (8 + CACHE_SIZE(cache)),
+ type, (mult << CACHE_ASSOC(cache)) >> 1,
+ type, 8 << CACHE_LINE(cache),
+ type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
+ CACHE_LINE(cache)));
+}
+
+static int c_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ seq_printf(m, "Processor\t: %s rev %d (%s)\n",
+ cpu_name, (int)processor_id & 15, elf_platform);
+
+#if defined(CONFIG_SMP)
+ for_each_online_cpu(i) {
+ seq_printf(m, "Processor\t: %d\n", i);
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
+ per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
+ (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
+ }
+#else /* CONFIG_SMP */
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000/HZ),
+ (loops_per_jiffy / (5000/HZ)) % 100);
+#endif
+
+ /* dump out the processor features */
+ seq_puts(m, "Features\t: ");
+
+ for (i = 0; hwcap_str[i]; i++)
+ if (elf_hwcap & (1 << i))
+ seq_printf(m, "%s ", hwcap_str[i]);
+
+ seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
+ seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
+
+ if ((processor_id & 0x0000f000) == 0x00000000) {
+ /* pre-ARM7 */
+ seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
+ } else {
+ if ((processor_id & 0x0000f000) == 0x00007000) {
+ /* ARM7 */
+ seq_printf(m, "CPU variant\t: 0x%02x\n",
+ (processor_id >> 16) & 127);
+ } else {
+ /* post-ARM7 */
+ seq_printf(m, "CPU variant\t: 0x%x\n",
+ (processor_id >> 20) & 15);
+ }
+ seq_printf(m, "CPU part\t: 0x%03x\n",
+ (processor_id >> 4) & 0xfff);
+ }
+ seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
+
+ {
+ unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
+ if (cache_info != processor_id) {
+ seq_printf(m, "Cache type\t: %s\n"
+ "Cache clean\t: %s\n"
+ "Cache lockdown\t: %s\n"
+ "Cache format\t: %s\n",
+ cache_types[CACHE_TYPE(cache_info)],
+ cache_clean[CACHE_TYPE(cache_info)],
+ cache_lockdown[CACHE_TYPE(cache_info)],
+ CACHE_S(cache_info) ? "Harvard" : "Unified");
+
+ if (CACHE_S(cache_info)) {
+ c_show_cache(m, "I", CACHE_ISIZE(cache_info));
+ c_show_cache(m, "D", CACHE_DSIZE(cache_info));
+ } else {
+ c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
+ }
+ }
+ }
+
+ seq_puts(m, "\n");
+
+ seq_printf(m, "Hardware\t: %s\n", machine_name);
+ seq_printf(m, "Revision\t: %04x\n", system_rev);
+ seq_printf(m, "Serial\t\t: %08x%08x\n",
+ system_serial_high, system_serial_low);
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < 1 ? (void *)1 : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return NULL;
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show
+};
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
new file mode 100644
index 00000000000..931919fd512
--- /dev/null
+++ b/arch/arm/kernel/signal.c
@@ -0,0 +1,748 @@
+/*
+ * linux/arch/arm/kernel/signal.c
+ *
+ * Copyright (C) 1995-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+
+#include "ptrace.h"
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/*
+ * For ARM syscalls, we encode the syscall number into the instruction.
+ */
+#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn))
+#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn))
+
+/*
+ * For Thumb syscalls, we pass the syscall number via r7. We therefore
+ * need two 16-bit instructions.
+ */
+#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
+#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
+
+static const unsigned long retcodes[4] = {
+ SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
+ SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
+};
+
+static int do_signal(sigset_t *oldset, struct pt_regs * regs, int syscall);
+
+/*
+ * atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs)
+{
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ regs->ARM_r0 = -EINTR;
+
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs, 0))
+ return regs->ARM_r0;
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ regs->ARM_r0 = -EINTR;
+
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs, 0))
+ return regs->ARM_r0;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+ struct old_sigaction __user *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_IWMMXT
+
+/* iwmmxt_area is 0x98 bytes long, preceeded by 8 bytes of signature */
+#define IWMMXT_STORAGE_SIZE (0x98 + 8)
+#define IWMMXT_MAGIC0 0x12ef842a
+#define IWMMXT_MAGIC1 0x1c07ca71
+
+struct iwmmxt_sigframe {
+ unsigned long magic0;
+ unsigned long magic1;
+ unsigned long storage[0x98/4];
+};
+
+static int page_present(struct mm_struct *mm, void __user *uptr, int wr)
+{
+ unsigned long addr = (unsigned long)uptr;
+ pgd_t *pgd = pgd_offset(mm, addr);
+ if (pgd_present(*pgd)) {
+ pmd_t *pmd = pmd_offset(pgd, addr);
+ if (pmd_present(*pmd)) {
+ pte_t *pte = pte_offset_map(pmd, addr);
+ return (pte_present(*pte) && (!wr || pte_write(*pte)));
+ }
+ }
+ return 0;
+}
+
+static int copy_locked(void __user *uptr, void *kptr, size_t size, int write,
+ void (*copyfn)(void *, void __user *))
+{
+ unsigned char v, __user *userptr = uptr;
+ int err = 0;
+
+ do {
+ struct mm_struct *mm;
+
+ if (write) {
+ __put_user_error(0, userptr, err);
+ __put_user_error(0, userptr + size - 1, err);
+ } else {
+ __get_user_error(v, userptr, err);
+ __get_user_error(v, userptr + size - 1, err);
+ }
+
+ if (err)
+ break;
+
+ mm = current->mm;
+ spin_lock(&mm->page_table_lock);
+ if (page_present(mm, userptr, write) &&
+ page_present(mm, userptr + size - 1, write)) {
+ copyfn(kptr, uptr);
+ } else
+ err = 1;
+ spin_unlock(&mm->page_table_lock);
+ } while (err);
+
+ return err;
+}
+
+static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
+{
+ int err = 0;
+
+ /* the iWMMXt context must be 64 bit aligned */
+ WARN_ON((unsigned long)frame & 7);
+
+ __put_user_error(IWMMXT_MAGIC0, &frame->magic0, err);
+ __put_user_error(IWMMXT_MAGIC1, &frame->magic1, err);
+
+ /*
+ * iwmmxt_task_copy() doesn't check user permissions.
+ * Let's do a dummy write on the upper boundary to ensure
+ * access to user mem is OK all way up.
+ */
+ err |= copy_locked(&frame->storage, current_thread_info(),
+ sizeof(frame->storage), 1, iwmmxt_task_copy);
+ return err;
+}
+
+static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
+{
+ unsigned long magic0, magic1;
+ int err = 0;
+
+ /* the iWMMXt context is 64 bit aligned */
+ WARN_ON((unsigned long)frame & 7);
+
+ /*
+ * Validate iWMMXt context signature.
+ * Also, iwmmxt_task_restore() doesn't check user permissions.
+ * Let's do a dummy write on the upper boundary to ensure
+ * access to user mem is OK all way up.
+ */
+ __get_user_error(magic0, &frame->magic0, err);
+ __get_user_error(magic1, &frame->magic1, err);
+ if (!err && magic0 == IWMMXT_MAGIC0 && magic1 == IWMMXT_MAGIC1)
+ err = copy_locked(&frame->storage, current_thread_info(),
+ sizeof(frame->storage), 0, iwmmxt_task_restore);
+ return err;
+}
+
+#endif
+
+/*
+ * Auxiliary signal frame. This saves stuff like FP state.
+ * The layout of this structure is not part of the user ABI.
+ */
+struct aux_sigframe {
+#ifdef CONFIG_IWMMXT
+ struct iwmmxt_sigframe iwmmxt;
+#endif
+#ifdef CONFIG_VFP
+ union vfp_state vfp;
+#endif
+};
+
+/*
+ * Do a signal return; undo the signal stack. These are aligned to 64-bit.
+ */
+struct sigframe {
+ struct sigcontext sc;
+ unsigned long extramask[_NSIG_WORDS-1];
+ unsigned long retcode;
+ struct aux_sigframe aux __attribute__((aligned(8)));
+};
+
+struct rt_sigframe {
+ struct siginfo __user *pinfo;
+ void __user *puc;
+ struct siginfo info;
+ struct ucontext uc;
+ unsigned long retcode;
+ struct aux_sigframe aux __attribute__((aligned(8)));
+};
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ struct aux_sigframe __user *aux)
+{
+ int err = 0;
+
+ __get_user_error(regs->ARM_r0, &sc->arm_r0, err);
+ __get_user_error(regs->ARM_r1, &sc->arm_r1, err);
+ __get_user_error(regs->ARM_r2, &sc->arm_r2, err);
+ __get_user_error(regs->ARM_r3, &sc->arm_r3, err);
+ __get_user_error(regs->ARM_r4, &sc->arm_r4, err);
+ __get_user_error(regs->ARM_r5, &sc->arm_r5, err);
+ __get_user_error(regs->ARM_r6, &sc->arm_r6, err);
+ __get_user_error(regs->ARM_r7, &sc->arm_r7, err);
+ __get_user_error(regs->ARM_r8, &sc->arm_r8, err);
+ __get_user_error(regs->ARM_r9, &sc->arm_r9, err);
+ __get_user_error(regs->ARM_r10, &sc->arm_r10, err);
+ __get_user_error(regs->ARM_fp, &sc->arm_fp, err);
+ __get_user_error(regs->ARM_ip, &sc->arm_ip, err);
+ __get_user_error(regs->ARM_sp, &sc->arm_sp, err);
+ __get_user_error(regs->ARM_lr, &sc->arm_lr, err);
+ __get_user_error(regs->ARM_pc, &sc->arm_pc, err);
+ __get_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err);
+
+ err |= !valid_user_regs(regs);
+
+#ifdef CONFIG_IWMMXT
+ if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
+ err |= restore_iwmmxt_context(&aux->iwmmxt);
+#endif
+#ifdef CONFIG_VFP
+// if (err == 0)
+// err |= vfp_restore_state(&aux->vfp);
+#endif
+
+ return err;
+}
+
+asmlinkage int sys_sigreturn(struct pt_regs *regs)
+{
+ struct sigframe __user *frame;
+ sigset_t set;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ /*
+ * Since we stacked the signal on a 64-bit boundary,
+ * then 'sp' should be word aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->ARM_sp & 7)
+ goto badframe;
+
+ frame = (struct sigframe __user *)regs->ARM_sp;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
+ goto badframe;
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_NSIG_WORDS > 1
+ && __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->sc, &frame->aux))
+ goto badframe;
+
+ /* Send SIGTRAP if we're single-stepping */
+ if (current->ptrace & PT_SINGLESTEP) {
+ ptrace_cancel_bpt(current);
+ send_sig(SIGTRAP, current, 1);
+ }
+
+ return regs->ARM_r0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ sigset_t set;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ /*
+ * Since we stacked the signal on a 64-bit boundary,
+ * then 'sp' should be word aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->ARM_sp & 7)
+ goto badframe;
+
+ frame = (struct rt_sigframe __user *)regs->ARM_sp;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &frame->aux))
+ goto badframe;
+
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
+ goto badframe;
+
+ /* Send SIGTRAP if we're single-stepping */
+ if (current->ptrace & PT_SINGLESTEP) {
+ ptrace_cancel_bpt(current);
+ send_sig(SIGTRAP, current, 1);
+ }
+
+ return regs->ARM_r0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+static int
+setup_sigcontext(struct sigcontext __user *sc, struct aux_sigframe __user *aux,
+ struct pt_regs *regs, unsigned long mask)
+{
+ int err = 0;
+
+ __put_user_error(regs->ARM_r0, &sc->arm_r0, err);
+ __put_user_error(regs->ARM_r1, &sc->arm_r1, err);
+ __put_user_error(regs->ARM_r2, &sc->arm_r2, err);
+ __put_user_error(regs->ARM_r3, &sc->arm_r3, err);
+ __put_user_error(regs->ARM_r4, &sc->arm_r4, err);
+ __put_user_error(regs->ARM_r5, &sc->arm_r5, err);
+ __put_user_error(regs->ARM_r6, &sc->arm_r6, err);
+ __put_user_error(regs->ARM_r7, &sc->arm_r7, err);
+ __put_user_error(regs->ARM_r8, &sc->arm_r8, err);
+ __put_user_error(regs->ARM_r9, &sc->arm_r9, err);
+ __put_user_error(regs->ARM_r10, &sc->arm_r10, err);
+ __put_user_error(regs->ARM_fp, &sc->arm_fp, err);
+ __put_user_error(regs->ARM_ip, &sc->arm_ip, err);
+ __put_user_error(regs->ARM_sp, &sc->arm_sp, err);
+ __put_user_error(regs->ARM_lr, &sc->arm_lr, err);
+ __put_user_error(regs->ARM_pc, &sc->arm_pc, err);
+ __put_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err);
+
+ __put_user_error(current->thread.trap_no, &sc->trap_no, err);
+ __put_user_error(current->thread.error_code, &sc->error_code, err);
+ __put_user_error(current->thread.address, &sc->fault_address, err);
+ __put_user_error(mask, &sc->oldmask, err);
+
+#ifdef CONFIG_IWMMXT
+ if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
+ err |= preserve_iwmmxt_context(&aux->iwmmxt);
+#endif
+#ifdef CONFIG_VFP
+// if (err == 0)
+// err |= vfp_save_state(&aux->vfp);
+#endif
+
+ return err;
+}
+
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize)
+{
+ unsigned long sp = regs->ARM_sp;
+ void __user *frame;
+
+ /*
+ * This is the X/Open sanctioned signal stack switching.
+ */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ /*
+ * ATPCS B01 mandates 8-byte alignment
+ */
+ frame = (void __user *)((sp - framesize) & ~7);
+
+ /*
+ * Check that we can actually write to the signal frame.
+ */
+ if (!access_ok(VERIFY_WRITE, frame, framesize))
+ frame = NULL;
+
+ return frame;
+}
+
+static int
+setup_return(struct pt_regs *regs, struct k_sigaction *ka,
+ unsigned long __user *rc, void __user *frame, int usig)
+{
+ unsigned long handler = (unsigned long)ka->sa.sa_handler;
+ unsigned long retcode;
+ int thumb = 0;
+ unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
+
+ /*
+ * Maybe we need to deliver a 32-bit signal to a 26-bit task.
+ */
+ if (ka->sa.sa_flags & SA_THIRTYTWO)
+ cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
+
+#ifdef CONFIG_ARM_THUMB
+ if (elf_hwcap & HWCAP_THUMB) {
+ /*
+ * The LSB of the handler determines if we're going to
+ * be using THUMB or ARM mode for this signal handler.
+ */
+ thumb = handler & 1;
+
+ if (thumb)
+ cpsr |= PSR_T_BIT;
+ else
+ cpsr &= ~PSR_T_BIT;
+ }
+#endif
+
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ retcode = (unsigned long)ka->sa.sa_restorer;
+ } else {
+ unsigned int idx = thumb;
+
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ idx += 2;
+
+ if (__put_user(retcodes[idx], rc))
+ return 1;
+
+ /*
+ * Ensure that the instruction cache sees
+ * the return code written onto the stack.
+ */
+ flush_icache_range((unsigned long)rc,
+ (unsigned long)(rc + 1));
+
+ retcode = ((unsigned long)rc) + thumb;
+ }
+
+ regs->ARM_r0 = usig;
+ regs->ARM_sp = (unsigned long)frame;
+ regs->ARM_lr = retcode;
+ regs->ARM_pc = handler;
+ regs->ARM_cpsr = cpsr;
+
+ return 0;
+}
+
+static int
+setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
+ int err = 0;
+
+ if (!frame)
+ return 1;
+
+ err |= setup_sigcontext(&frame->sc, &frame->aux, regs, set->sig[0]);
+
+ if (_NSIG_WORDS > 1) {
+ err |= __copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+ }
+
+ if (err == 0)
+ err = setup_return(regs, ka, &frame->retcode, frame, usig);
+
+ return err;
+}
+
+static int
+setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
+ stack_t stack;
+ int err = 0;
+
+ if (!frame)
+ return 1;
+
+ __put_user_error(&frame->info, &frame->pinfo, err);
+ __put_user_error(&frame->uc, &frame->puc, err);
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ __put_user_error(0, &frame->uc.uc_flags, err);
+ __put_user_error(NULL, &frame->uc.uc_link, err);
+
+ memset(&stack, 0, sizeof(stack));
+ stack.ss_sp = (void __user *)current->sas_ss_sp;
+ stack.ss_flags = sas_ss_flags(regs->ARM_sp);
+ stack.ss_size = current->sas_ss_size;
+ err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack));
+
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->aux,
+ regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ if (err == 0)
+ err = setup_return(regs, ka, &frame->retcode, frame, usig);
+
+ if (err == 0) {
+ /*
+ * For realtime signals we must also set the second and third
+ * arguments for the signal handler.
+ * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
+ */
+ regs->ARM_r1 = (unsigned long)&frame->info;
+ regs->ARM_r2 = (unsigned long)&frame->uc;
+ }
+
+ return err;
+}
+
+static inline void restart_syscall(struct pt_regs *regs)
+{
+ regs->ARM_r0 = regs->ARM_ORIG_r0;
+ regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset,
+ struct pt_regs * regs, int syscall)
+{
+ struct thread_info *thread = current_thread_info();
+ struct task_struct *tsk = current;
+ int usig = sig;
+ int ret;
+
+ /*
+ * If we were from a system call, check for system call restarting...
+ */
+ if (syscall) {
+ switch (regs->ARM_r0) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->ARM_r0 = -EINTR;
+ break;
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->ARM_r0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ restart_syscall(regs);
+ }
+ }
+
+ /*
+ * translate the signal
+ */
+ if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
+ usig = thread->exec_domain->signal_invmap[usig];
+
+ /*
+ * Set up the stack frame
+ */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ ret = setup_rt_frame(usig, ka, info, oldset, regs);
+ else
+ ret = setup_frame(usig, ka, oldset, regs);
+
+ /*
+ * Check that the resulting registers are actually sane.
+ */
+ ret |= !valid_user_regs(regs);
+
+ /*
+ * Block the signal if we were unsuccessful.
+ */
+ if (ret != 0 || !(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(&tsk->sighand->siglock);
+ sigorsets(&tsk->blocked, &tsk->blocked,
+ &ka->sa.sa_mask);
+ sigaddset(&tsk->blocked, sig);
+ recalc_sigpending();
+ spin_unlock_irq(&tsk->sighand->siglock);
+ }
+
+ if (ret == 0)
+ return;
+
+ force_sigsegv(sig, tsk);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
+{
+ struct k_sigaction ka;
+ siginfo_t info;
+ int signr;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return 0;
+
+ if (try_to_freeze(0))
+ goto no_signal;
+
+ if (current->ptrace & PT_SINGLESTEP)
+ ptrace_cancel_bpt(current);
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ handle_signal(signr, &ka, &info, oldset, regs, syscall);
+ if (current->ptrace & PT_SINGLESTEP)
+ ptrace_set_bpt(current);
+ return 1;
+ }
+
+ no_signal:
+ /*
+ * No signal to deliver to the process - restart the syscall.
+ */
+ if (syscall) {
+ if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
+ if (thumb_mode(regs)) {
+ regs->ARM_r7 = __NR_restart_syscall;
+ regs->ARM_pc -= 2;
+ } else {
+ u32 __user *usp;
+
+ regs->ARM_sp -= 12;
+ usp = (u32 __user *)regs->ARM_sp;
+
+ put_user(regs->ARM_pc, &usp[0]);
+ /* swi __NR_restart_syscall */
+ put_user(0xef000000 | __NR_restart_syscall, &usp[1]);
+ /* ldr pc, [sp], #12 */
+ put_user(0xe49df00c, &usp[2]);
+
+ flush_icache_range((unsigned long)usp,
+ (unsigned long)(usp + 3));
+
+ regs->ARM_pc = regs->ARM_sp + 4;
+ }
+ }
+ if (regs->ARM_r0 == -ERESTARTNOHAND ||
+ regs->ARM_r0 == -ERESTARTSYS ||
+ regs->ARM_r0 == -ERESTARTNOINTR) {
+ restart_syscall(regs);
+ }
+ }
+ if (current->ptrace & PT_SINGLESTEP)
+ ptrace_set_bpt(current);
+ return 0;
+}
+
+asmlinkage void
+do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+{
+ if (thread_flags & _TIF_SIGPENDING)
+ do_signal(&current->blocked, regs, syscall);
+}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
new file mode 100644
index 00000000000..ecc8c333240
--- /dev/null
+++ b/arch/arm/kernel/smp.c
@@ -0,0 +1,396 @@
+/*
+ * linux/arch/arm/kernel/smp.c
+ *
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/config.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/cache.h>
+#include <linux/profile.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/seq_file.h>
+
+#include <asm/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/tlbflush.h>
+#include <asm/ptrace.h>
+
+/*
+ * bitmask of present and online CPUs.
+ * The present bitmask indicates that the CPU is physically present.
+ * The online bitmask indicates that the CPU is up and running.
+ */
+cpumask_t cpu_present_mask;
+cpumask_t cpu_online_map;
+
+/*
+ * structures for inter-processor calls
+ * - A collection of single bit ipi messages.
+ */
+struct ipi_data {
+ spinlock_t lock;
+ unsigned long ipi_count;
+ unsigned long bits;
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
+ .lock = SPIN_LOCK_UNLOCKED,
+};
+
+enum ipi_msg_type {
+ IPI_TIMER,
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+};
+
+struct smp_call_struct {
+ void (*func)(void *info);
+ void *info;
+ int wait;
+ cpumask_t pending;
+ cpumask_t unfinished;
+};
+
+static struct smp_call_struct * volatile smp_call_function_data;
+static DEFINE_SPINLOCK(smp_call_function_lock);
+
+int __init __cpu_up(unsigned int cpu)
+{
+ struct task_struct *idle;
+ int ret;
+
+ /*
+ * Spawn a new process manually. Grab a pointer to
+ * its task struct so we can mess with it
+ */
+ idle = fork_idle(cpu);
+ if (IS_ERR(idle)) {
+ printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
+ return PTR_ERR(idle);
+ }
+
+ /*
+ * Now bring the CPU into our world.
+ */
+ ret = boot_secondary(cpu, idle);
+ if (ret) {
+ printk(KERN_CRIT "cpu_up: processor %d failed to boot\n", cpu);
+ /*
+ * FIXME: We need to clean up the new idle thread. --rmk
+ */
+ }
+
+ return ret;
+}
+
+/*
+ * Called by both boot and secondaries to move global data into
+ * per-processor storage.
+ */
+void __init smp_store_cpu_info(unsigned int cpuid)
+{
+ struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
+
+ cpu_info->loops_per_jiffy = loops_per_jiffy;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+ int cpu;
+ unsigned long bogosum = 0;
+
+ for_each_online_cpu(cpu)
+ bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
+
+ printk(KERN_INFO "SMP: Total of %d processors activated "
+ "(%lu.%02lu BogoMIPS).\n",
+ num_online_cpus(),
+ bogosum / (500000/HZ),
+ (bogosum / (5000/HZ)) % 100);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ cpu_set(cpu, cpu_present_mask);
+ cpu_set(cpu, cpu_online_map);
+}
+
+static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
+{
+ unsigned long flags;
+ unsigned int cpu;
+
+ local_irq_save(flags);
+
+ for_each_cpu_mask(cpu, callmap) {
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+
+ spin_lock(&ipi->lock);
+ ipi->bits |= 1 << msg;
+ spin_unlock(&ipi->lock);
+ }
+
+ /*
+ * Call the platform specific cross-CPU call function.
+ */
+ smp_cross_call(callmap);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * You must not call this function with disabled interrupts, from a
+ * hardware interrupt handler, nor from a bottom half handler.
+ */
+int smp_call_function_on_cpu(void (*func)(void *info), void *info, int retry,
+ int wait, cpumask_t callmap)
+{
+ struct smp_call_struct data;
+ unsigned long timeout;
+ int ret = 0;
+
+ data.func = func;
+ data.info = info;
+ data.wait = wait;
+
+ cpu_clear(smp_processor_id(), callmap);
+ if (cpus_empty(callmap))
+ goto out;
+
+ data.pending = callmap;
+ if (wait)
+ data.unfinished = callmap;
+
+ /*
+ * try to get the mutex on smp_call_function_data
+ */
+ spin_lock(&smp_call_function_lock);
+ smp_call_function_data = &data;
+
+ send_ipi_message(callmap, IPI_CALL_FUNC);
+
+ timeout = jiffies + HZ;
+ while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
+ barrier();
+
+ /*
+ * did we time out?
+ */
+ if (!cpus_empty(data.pending)) {
+ /*
+ * this may be causing our panic - report it
+ */
+ printk(KERN_CRIT
+ "CPU%u: smp_call_function timeout for %p(%p)\n"
+ " callmap %lx pending %lx, %swait\n",
+ smp_processor_id(), func, info, callmap, data.pending,
+ wait ? "" : "no ");
+
+ /*
+ * TRACE
+ */
+ timeout = jiffies + (5 * HZ);
+ while (!cpus_empty(data.pending) && time_before(jiffies, timeout))
+ barrier();
+
+ if (cpus_empty(data.pending))
+ printk(KERN_CRIT " RESOLVED\n");
+ else
+ printk(KERN_CRIT " STILL STUCK\n");
+ }
+
+ /*
+ * whatever happened, we're done with the data, so release it
+ */
+ smp_call_function_data = NULL;
+ spin_unlock(&smp_call_function_lock);
+
+ if (!cpus_empty(data.pending)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (wait)
+ while (!cpus_empty(data.unfinished))
+ barrier();
+ out:
+
+ return 0;
+}
+
+int smp_call_function(void (*func)(void *info), void *info, int retry,
+ int wait)
+{
+ return smp_call_function_on_cpu(func, info, retry, wait,
+ cpu_online_map);
+}
+
+void show_ipi_list(struct seq_file *p)
+{
+ unsigned int cpu;
+
+ seq_puts(p, "IPI:");
+
+ for_each_online_cpu(cpu)
+ seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
+
+ seq_putc(p, '\n');
+}
+
+static void ipi_timer(struct pt_regs *regs)
+{
+ int user = user_mode(regs);
+
+ irq_enter();
+ profile_tick(CPU_PROFILING, regs);
+ update_process_times(user);
+ irq_exit();
+}
+
+/*
+ * ipi_call_function - handle IPI from smp_call_function()
+ *
+ * Note that we copy data out of the cross-call structure and then
+ * let the caller know that we're here and have done with their data
+ */
+static void ipi_call_function(unsigned int cpu)
+{
+ struct smp_call_struct *data = smp_call_function_data;
+ void (*func)(void *info) = data->func;
+ void *info = data->info;
+ int wait = data->wait;
+
+ cpu_clear(cpu, data->pending);
+
+ func(info);
+
+ if (wait)
+ cpu_clear(cpu, data->unfinished);
+}
+
+static DEFINE_SPINLOCK(stop_lock);
+
+/*
+ * ipi_cpu_stop - handle IPI from smp_send_stop()
+ */
+static void ipi_cpu_stop(unsigned int cpu)
+{
+ spin_lock(&stop_lock);
+ printk(KERN_CRIT "CPU%u: stopping\n", cpu);
+ dump_stack();
+ spin_unlock(&stop_lock);
+
+ cpu_clear(cpu, cpu_online_map);
+
+ local_fiq_disable();
+ local_irq_disable();
+
+ while (1)
+ cpu_relax();
+}
+
+/*
+ * Main handler for inter-processor interrupts
+ *
+ * For ARM, the ipimask now only identifies a single
+ * category of IPI (Bit 1 IPIs have been replaced by a
+ * different mechanism):
+ *
+ * Bit 0 - Inter-processor function call
+ */
+void do_IPI(struct pt_regs *regs)
+{
+ unsigned int cpu = smp_processor_id();
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+
+ ipi->ipi_count++;
+
+ for (;;) {
+ unsigned long msgs;
+
+ spin_lock(&ipi->lock);
+ msgs = ipi->bits;
+ ipi->bits = 0;
+ spin_unlock(&ipi->lock);
+
+ if (!msgs)
+ break;
+
+ do {
+ unsigned nextmsg;
+
+ nextmsg = msgs & -msgs;
+ msgs &= ~nextmsg;
+ nextmsg = ffz(~nextmsg);
+
+ switch (nextmsg) {
+ case IPI_TIMER:
+ ipi_timer(regs);
+ break;
+
+ case IPI_RESCHEDULE:
+ /*
+ * nothing more to do - eveything is
+ * done on the interrupt return path
+ */
+ break;
+
+ case IPI_CALL_FUNC:
+ ipi_call_function(cpu);
+ break;
+
+ case IPI_CPU_STOP:
+ ipi_cpu_stop(cpu);
+ break;
+
+ default:
+ printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
+ cpu, nextmsg);
+ break;
+ }
+ } while (msgs);
+ }
+}
+
+void smp_send_reschedule(int cpu)
+{
+ send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_timer(void)
+{
+ cpumask_t mask = cpu_online_map;
+ cpu_clear(smp_processor_id(), mask);
+ send_ipi_message(mask, IPI_TIMER);
+}
+
+void smp_send_stop(void)
+{
+ cpumask_t mask = cpu_online_map;
+ cpu_clear(smp_processor_id(), mask);
+ send_ipi_message(mask, IPI_CPU_STOP);
+}
+
+/*
+ * not supported here
+ */
+int __init setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
new file mode 100644
index 00000000000..c41dc605f12
--- /dev/null
+++ b/arch/arm/kernel/sys_arm.c
@@ -0,0 +1,332 @@
+/*
+ * linux/arch/arm/kernel/sys_arm.c
+ *
+ * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
+ * Copyright (C) 1995, 1996 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/arm
+ * platform.
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+
+#include <asm/uaccess.h>
+#include <asm/ipc.h>
+
+extern unsigned long do_mremap(unsigned long addr, unsigned long old_len,
+ unsigned long new_len, unsigned long flags,
+ unsigned long new_addr);
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way unix traditionally does this, though.
+ */
+asmlinkage int sys_pipe(unsigned long __user *fildes)
+{
+ int fd[2];
+ int error;
+
+ error = do_pipe(fd);
+ if (!error) {
+ if (copy_to_user(fildes, fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+/*
+ * This is the lowest virtual address we can permit any user space
+ * mapping to be mapped at. This is particularly important for
+ * non-high vector CPUs.
+ */
+#define MIN_MAP_ADDR (PAGE_SIZE)
+
+/* common code for old and new mmaps */
+inline long do_mmap2(
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EINVAL;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ if (flags & MAP_FIXED && addr < MIN_MAP_ADDR)
+ goto out;
+
+ error = -EBADF;
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
+struct mmap_arg_struct {
+ unsigned long addr;
+ unsigned long len;
+ unsigned long prot;
+ unsigned long flags;
+ unsigned long fd;
+ unsigned long offset;
+};
+
+asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
+{
+ int error = -EFAULT;
+ struct mmap_arg_struct a;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ goto out;
+
+ error = -EINVAL;
+ if (a.offset & ~PAGE_MASK)
+ goto out;
+
+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+out:
+ return error;
+}
+
+asmlinkage unsigned long
+sys_arm_mremap(unsigned long addr, unsigned long old_len,
+ unsigned long new_len, unsigned long flags,
+ unsigned long new_addr)
+{
+ unsigned long ret = -EINVAL;
+
+ if (flags & MREMAP_FIXED && new_addr < MIN_MAP_ADDR)
+ goto out;
+
+ down_write(&current->mm->mmap_sem);
+ ret = do_mremap(addr, old_len, new_len, flags, new_addr);
+ up_write(&current->mm->mmap_sem);
+
+out:
+ return ret;
+}
+
+/*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls.
+ */
+
+struct sel_arg_struct {
+ unsigned long n;
+ fd_set __user *inp, *outp, *exp;
+ struct timeval __user *tvp;
+};
+
+asmlinkage int old_select(struct sel_arg_struct __user *arg)
+{
+ struct sel_arg_struct a;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+ /* sys_select() does the appropriate kernel locking */
+ return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage int sys_ipc(uint call, int first, int second, int third,
+ void __user *ptr, long fifth)
+{
+ int version, ret;
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ switch (call) {
+ case SEMOP:
+ return sys_semtimedop (first, (struct sembuf __user *)ptr, second, NULL);
+ case SEMTIMEDOP:
+ return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
+ (const struct timespec __user *)fifth);
+
+ case SEMGET:
+ return sys_semget (first, second, third);
+ case SEMCTL: {
+ union semun fourth;
+ if (!ptr)
+ return -EINVAL;
+ if (get_user(fourth.__pad, (void __user * __user *) ptr))
+ return -EFAULT;
+ return sys_semctl (first, second, third, fourth);
+ }
+
+ case MSGSND:
+ return sys_msgsnd(first, (struct msgbuf __user *) ptr,
+ second, third);
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+ if (!ptr)
+ return -EINVAL;
+ if (copy_from_user(&tmp,(struct ipc_kludge __user *)ptr,
+ sizeof (tmp)))
+ return -EFAULT;
+ return sys_msgrcv (first, tmp.msgp, second,
+ tmp.msgtyp, third);
+ }
+ default:
+ return sys_msgrcv (first,
+ (struct msgbuf __user *) ptr,
+ second, fifth, third);
+ }
+ case MSGGET:
+ return sys_msgget ((key_t) first, second);
+ case MSGCTL:
+ return sys_msgctl(first, second, (struct msqid_ds __user *)ptr);
+
+ case SHMAT:
+ switch (version) {
+ default: {
+ ulong raddr;
+ ret = do_shmat(first, (char __user *)ptr, second, &raddr);
+ if (ret)
+ return ret;
+ return put_user(raddr, (ulong __user *)third);
+ }
+ case 1: /* Of course, we don't support iBCS2! */
+ return -EINVAL;
+ }
+ case SHMDT:
+ return sys_shmdt ((char __user *)ptr);
+ case SHMGET:
+ return sys_shmget (first, second, third);
+ case SHMCTL:
+ return sys_shmctl (first, second,
+ (struct shmid_ds __user *) ptr);
+ default:
+ return -ENOSYS;
+ }
+}
+
+asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg,
+ unsigned long __user *addr)
+{
+ unsigned long ret;
+ long err;
+
+ err = do_shmat(shmid, shmaddr, shmflg, &ret);
+ if (err == 0)
+ err = put_user(ret, addr);
+ return err;
+}
+
+/* Fork a new task - this creates a new program thread.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage int sys_fork(struct pt_regs *regs)
+{
+ return do_fork(SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
+}
+
+/* Clone a task - this clones the calling program thread.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+ int __user *parent_tidptr, int tls_val,
+ int __user *child_tidptr, struct pt_regs *regs)
+{
+ if (!newsp)
+ newsp = regs->ARM_sp;
+
+ return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
+}
+
+asmlinkage int sys_vfork(struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->ARM_sp, regs, 0, NULL, NULL);
+}
+
+/* sys_execve() executes a new program.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage int sys_execve(char __user *filenamei, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs)
+{
+ int error;
+ char * filename;
+
+ filename = getname(filenamei);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+out:
+ return error;
+}
+
+long execve(const char *filename, char **argv, char **envp)
+{
+ struct pt_regs regs;
+ int ret;
+
+ memset(&regs, 0, sizeof(struct pt_regs));
+ ret = do_execve((char *)filename, (char __user * __user *)argv,
+ (char __user * __user *)envp, &regs);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Save argc to the register structure for userspace.
+ */
+ regs.ARM_r0 = ret;
+
+ /*
+ * We were successful. We won't be returning to our caller, but
+ * instead to user space by manipulating the kernel stack.
+ */
+ asm( "add r0, %0, %1\n\t"
+ "mov r1, %2\n\t"
+ "mov r2, %3\n\t"
+ "bl memmove\n\t" /* copy regs to top of stack */
+ "mov r8, #0\n\t" /* not a syscall */
+ "mov r9, %0\n\t" /* thread structure */
+ "mov sp, r0\n\t" /* reposition stack pointer */
+ "b ret_to_user"
+ :
+ : "r" (current_thread_info()),
+ "Ir" (THREAD_SIZE - 8 - sizeof(regs)),
+ "r" (&regs),
+ "Ir" (sizeof(regs))
+ : "r0", "r1", "r2", "r3", "ip", "memory");
+
+ out:
+ return ret;
+}
+EXPORT_SYMBOL(execve);
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
new file mode 100644
index 00000000000..c232f24f4a6
--- /dev/null
+++ b/arch/arm/kernel/time.c
@@ -0,0 +1,402 @@
+/*
+ * linux/arch/arm/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ * Modifications for ARM (C) 1994-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains the ARM-specific time handling details:
+ * reading the RTC at bootup, etc...
+ *
+ * 1994-07-02 Alan Modra
+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/timex.h>
+#include <linux/errno.h>
+#include <linux/profile.h>
+#include <linux/sysdev.h>
+#include <linux/timer.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/leds.h>
+#include <asm/thread_info.h>
+#include <asm/mach/time.h>
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+/*
+ * Our system timer.
+ */
+struct sys_timer *system_timer;
+
+extern unsigned long wall_jiffies;
+
+/* this needs a better home */
+DEFINE_SPINLOCK(rtc_lock);
+
+#ifdef CONFIG_SA1100_RTC_MODULE
+EXPORT_SYMBOL(rtc_lock);
+#endif
+
+/* change this if you have some constant time drift */
+#define USECS_PER_JIFFY (1000000/HZ)
+
+#ifdef CONFIG_SMP
+unsigned long profile_pc(struct pt_regs *regs)
+{
+ unsigned long fp, pc = instruction_pointer(regs);
+
+ if (in_lock_functions(pc)) {
+ fp = regs->ARM_fp;
+ pc = pc_pointer(((unsigned long *)fp)[-1]);
+ }
+
+ return pc;
+}
+EXPORT_SYMBOL(profile_pc);
+#endif
+
+/*
+ * hook for setting the RTC's idea of the current time.
+ */
+int (*set_rtc)(void);
+
+static unsigned long dummy_gettimeoffset(void)
+{
+ return 0;
+}
+
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ * This is the default implementation. Sub-architecture
+ * implementations can override this.
+ */
+unsigned long long __attribute__((weak)) sched_clock(void)
+{
+ return (unsigned long long)jiffies * (1000000000 / HZ);
+}
+
+static unsigned long next_rtc_update;
+
+/*
+ * If we have an externally synchronized linux clock, then update
+ * CMOS clock accordingly every ~11 minutes. set_rtc() has to be
+ * called as close as possible to 500 ms before the new second
+ * starts.
+ */
+static inline void do_set_rtc(void)
+{
+ if (time_status & STA_UNSYNC || set_rtc == NULL)
+ return;
+
+ if (next_rtc_update &&
+ time_before((unsigned long)xtime.tv_sec, next_rtc_update))
+ return;
+
+ if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) &&
+ xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1))
+ return;
+
+ if (set_rtc())
+ /*
+ * rtc update failed. Try again in 60s
+ */
+ next_rtc_update = xtime.tv_sec + 60;
+ else
+ next_rtc_update = xtime.tv_sec + 660;
+}
+
+#ifdef CONFIG_LEDS
+
+static void dummy_leds_event(led_event_t evt)
+{
+}
+
+void (*leds_event)(led_event_t) = dummy_leds_event;
+
+struct leds_evt_name {
+ const char name[8];
+ int on;
+ int off;
+};
+
+static const struct leds_evt_name evt_names[] = {
+ { "amber", led_amber_on, led_amber_off },
+ { "blue", led_blue_on, led_blue_off },
+ { "green", led_green_on, led_green_off },
+ { "red", led_red_on, led_red_off },
+};
+
+static ssize_t leds_store(struct sys_device *dev, const char *buf, size_t size)
+{
+ int ret = -EINVAL, len = strcspn(buf, " ");
+
+ if (len > 0 && buf[len] == '\0')
+ len--;
+
+ if (strncmp(buf, "claim", len) == 0) {
+ leds_event(led_claim);
+ ret = size;
+ } else if (strncmp(buf, "release", len) == 0) {
+ leds_event(led_release);
+ ret = size;
+ } else {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(evt_names); i++) {
+ if (strlen(evt_names[i].name) != len ||
+ strncmp(buf, evt_names[i].name, len) != 0)
+ continue;
+ if (strncmp(buf+len, " on", 3) == 0) {
+ leds_event(evt_names[i].on);
+ ret = size;
+ } else if (strncmp(buf+len, " off", 4) == 0) {
+ leds_event(evt_names[i].off);
+ ret = size;
+ }
+ break;
+ }
+ }
+ return ret;
+}
+
+static SYSDEV_ATTR(event, 0200, NULL, leds_store);
+
+static int leds_suspend(struct sys_device *dev, pm_message_t state)
+{
+ leds_event(led_stop);
+ return 0;
+}
+
+static int leds_resume(struct sys_device *dev)
+{
+ leds_event(led_start);
+ return 0;
+}
+
+static int leds_shutdown(struct sys_device *dev)
+{
+ leds_event(led_halted);
+ return 0;
+}
+
+static struct sysdev_class leds_sysclass = {
+ set_kset_name("leds"),
+ .shutdown = leds_shutdown,
+ .suspend = leds_suspend,
+ .resume = leds_resume,
+};
+
+static struct sys_device leds_device = {
+ .id = 0,
+ .cls = &leds_sysclass,
+};
+
+static int __init leds_init(void)
+{
+ int ret;
+ ret = sysdev_class_register(&leds_sysclass);
+ if (ret == 0)
+ ret = sysdev_register(&leds_device);
+ if (ret == 0)
+ ret = sysdev_create_file(&leds_device, &attr_event);
+ return ret;
+}
+
+device_initcall(leds_init);
+
+EXPORT_SYMBOL(leds_event);
+#endif
+
+#ifdef CONFIG_LEDS_TIMER
+static inline void do_leds(void)
+{
+ static unsigned int count = 50;
+
+ if (--count == 0) {
+ count = 50;
+ leds_event(led_timer);
+ }
+}
+#else
+#define do_leds()
+#endif
+
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+ unsigned long seq;
+ unsigned long usec, sec, lost;
+
+ do {
+ seq = read_seqbegin_irqsave(&xtime_lock, flags);
+ usec = system_timer->offset();
+
+ lost = jiffies - wall_jiffies;
+ if (lost)
+ usec += lost * USECS_PER_JIFFY;
+
+ sec = xtime.tv_sec;
+ usec += xtime.tv_nsec / 1000;
+ } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+
+ /* usec may have gone up a lot: be safe */
+ while (usec >= 1000000) {
+ usec -= 1000000;
+ sec++;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int do_settimeofday(struct timespec *tv)
+{
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec, nsec = tv->tv_nsec;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_seqlock_irq(&xtime_lock);
+ /*
+ * This is revolting. We need to set "xtime" correctly. However, the
+ * value in this location is the value at the most recent update of
+ * wall time. Discover what correction gettimeofday() would have
+ * done, and then undo it!
+ */
+ nsec -= system_timer->offset() * NSEC_PER_USEC;
+ nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
+
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+ write_sequnlock_irq(&xtime_lock);
+ clock_was_set();
+ return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+/**
+ * save_time_delta - Save the offset between system time and RTC time
+ * @delta: pointer to timespec to store delta
+ * @rtc: pointer to timespec for current RTC time
+ *
+ * Return a delta between the system time and the RTC time, such
+ * that system time can be restored later with restore_time_delta()
+ */
+void save_time_delta(struct timespec *delta, struct timespec *rtc)
+{
+ set_normalized_timespec(delta,
+ xtime.tv_sec - rtc->tv_sec,
+ xtime.tv_nsec - rtc->tv_nsec);
+}
+EXPORT_SYMBOL(save_time_delta);
+
+/**
+ * restore_time_delta - Restore the current system time
+ * @delta: delta returned by save_time_delta()
+ * @rtc: pointer to timespec for current RTC time
+ */
+void restore_time_delta(struct timespec *delta, struct timespec *rtc)
+{
+ struct timespec ts;
+
+ set_normalized_timespec(&ts,
+ delta->tv_sec + rtc->tv_sec,
+ delta->tv_nsec + rtc->tv_nsec);
+
+ do_settimeofday(&ts);
+}
+EXPORT_SYMBOL(restore_time_delta);
+
+/*
+ * Kernel system timer support.
+ */
+void timer_tick(struct pt_regs *regs)
+{
+ profile_tick(CPU_PROFILING, regs);
+ do_leds();
+ do_set_rtc();
+ do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
+}
+
+#ifdef CONFIG_PM
+static int timer_suspend(struct sys_device *dev, pm_message_t state)
+{
+ struct sys_timer *timer = container_of(dev, struct sys_timer, dev);
+
+ if (timer->suspend != NULL)
+ timer->suspend();
+
+ return 0;
+}
+
+static int timer_resume(struct sys_device *dev)
+{
+ struct sys_timer *timer = container_of(dev, struct sys_timer, dev);
+
+ if (timer->resume != NULL)
+ timer->resume();
+
+ return 0;
+}
+#else
+#define timer_suspend NULL
+#define timer_resume NULL
+#endif
+
+static struct sysdev_class timer_sysclass = {
+ set_kset_name("timer"),
+ .suspend = timer_suspend,
+ .resume = timer_resume,
+};
+
+static int __init timer_init_sysfs(void)
+{
+ int ret = sysdev_class_register(&timer_sysclass);
+ if (ret == 0) {
+ system_timer->dev.cls = &timer_sysclass;
+ ret = sysdev_register(&system_timer->dev);
+ }
+ return ret;
+}
+
+device_initcall(timer_init_sysfs);
+
+void __init time_init(void)
+{
+ if (system_timer->offset == NULL)
+ system_timer->offset = dummy_gettimeoffset;
+ system_timer->init();
+}
+
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
new file mode 100644
index 00000000000..93dc4646cd7
--- /dev/null
+++ b/arch/arm/kernel/traps.c
@@ -0,0 +1,590 @@
+/*
+ * linux/arch/arm/kernel/traps.c
+ *
+ * Copyright (C) 1995-2002 Russell King
+ * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 'traps.c' handles hardware exceptions after we have saved some state in
+ * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
+ * kill the offending process.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/spinlock.h>
+#include <linux/personality.h>
+#include <linux/ptrace.h>
+#include <linux/kallsyms.h>
+#include <linux/init.h>
+
+#include <asm/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/traps.h>
+
+#include "ptrace.h"
+
+extern void c_backtrace (unsigned long fp, int pmode);
+extern void show_pte(struct mm_struct *mm, unsigned long addr);
+
+const char *processor_modes[]=
+{ "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
+ "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
+ "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
+ "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
+};
+
+static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+
+#ifdef CONFIG_DEBUG_USER
+unsigned int user_debug;
+
+static int __init user_debug_setup(char *str)
+{
+ get_option(&str, &user_debug);
+ return 1;
+}
+__setup("user_debug=", user_debug_setup);
+#endif
+
+void dump_backtrace_entry(unsigned long where, unsigned long from)
+{
+#ifdef CONFIG_KALLSYMS
+ printk("[<%08lx>] ", where);
+ print_symbol("(%s) ", where);
+ printk("from [<%08lx>] ", from);
+ print_symbol("(%s)\n", from);
+#else
+ printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+#endif
+}
+
+/*
+ * Stack pointers should always be within the kernels view of
+ * physical memory. If it is not there, then we can't dump
+ * out any information relating to the stack.
+ */
+static int verify_stack(unsigned long sp)
+{
+ if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != 0))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Dump out the contents of some memory nicely...
+ */
+static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
+{
+ unsigned long p = bottom & ~31;
+ mm_segment_t fs;
+ int i;
+
+ /*
+ * We need to switch to kernel mode so that we can use __get_user
+ * to safely read from kernel space. Note that we now dump the
+ * code first, just in case the backtrace kills us.
+ */
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
+
+ for (p = bottom & ~31; p < top;) {
+ printk("%04lx: ", p & 0xffff);
+
+ for (i = 0; i < 8; i++, p += 4) {
+ unsigned int val;
+
+ if (p < bottom || p >= top)
+ printk(" ");
+ else {
+ __get_user(val, (unsigned long *)p);
+ printk("%08x ", val);
+ }
+ }
+ printk ("\n");
+ }
+
+ set_fs(fs);
+}
+
+static void dump_instr(struct pt_regs *regs)
+{
+ unsigned long addr = instruction_pointer(regs);
+ const int thumb = thumb_mode(regs);
+ const int width = thumb ? 4 : 8;
+ mm_segment_t fs;
+ int i;
+
+ /*
+ * We need to switch to kernel mode so that we can use __get_user
+ * to safely read from kernel space. Note that we now dump the
+ * code first, just in case the backtrace kills us.
+ */
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ printk("Code: ");
+ for (i = -4; i < 1; i++) {
+ unsigned int val, bad;
+
+ if (thumb)
+ bad = __get_user(val, &((u16 *)addr)[i]);
+ else
+ bad = __get_user(val, &((u32 *)addr)[i]);
+
+ if (!bad)
+ printk(i == 0 ? "(%0*x) " : "%0*x ", width, val);
+ else {
+ printk("bad PC value.");
+ break;
+ }
+ }
+ printk("\n");
+
+ set_fs(fs);
+}
+
+static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+{
+ unsigned int fp;
+ int ok = 1;
+
+ printk("Backtrace: ");
+ fp = regs->ARM_fp;
+ if (!fp) {
+ printk("no frame pointer");
+ ok = 0;
+ } else if (verify_stack(fp)) {
+ printk("invalid frame pointer 0x%08x", fp);
+ ok = 0;
+ } else if (fp < (unsigned long)(tsk->thread_info + 1))
+ printk("frame pointer underflow");
+ printk("\n");
+
+ if (ok)
+ c_backtrace(fp, processor_mode(regs));
+}
+
+void dump_stack(void)
+{
+#ifdef CONFIG_DEBUG_ERRORS
+ __backtrace();
+#endif
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+ unsigned long fp;
+
+ if (!tsk)
+ tsk = current;
+
+ if (tsk != current)
+ fp = thread_saved_fp(tsk);
+ else
+ asm("mov%? %0, fp" : "=r" (fp));
+
+ c_backtrace(fp, 0x10);
+ barrier();
+}
+
+DEFINE_SPINLOCK(die_lock);
+
+/*
+ * This function is protected against re-entrancy.
+ */
+NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
+{
+ struct task_struct *tsk = current;
+ static int die_counter;
+
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+
+ printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
+ print_modules();
+ printk("CPU: %d\n", smp_processor_id());
+ show_regs(regs);
+ printk("Process %s (pid: %d, stack limit = 0x%p)\n",
+ tsk->comm, tsk->pid, tsk->thread_info + 1);
+
+ if (!user_mode(regs) || in_interrupt()) {
+ dump_mem("Stack: ", regs->ARM_sp, 8192+(unsigned long)tsk->thread_info);
+ dump_backtrace(regs, tsk);
+ dump_instr(regs);
+ }
+
+ bust_spinlocks(0);
+ spin_unlock_irq(&die_lock);
+ do_exit(SIGSEGV);
+}
+
+void die_if_kernel(const char *str, struct pt_regs *regs, int err)
+{
+ if (user_mode(regs))
+ return;
+
+ die(str, regs, err);
+}
+
+static void notify_die(const char *str, struct pt_regs *regs, siginfo_t *info,
+ unsigned long err, unsigned long trap)
+{
+ if (user_mode(regs)) {
+ current->thread.error_code = err;
+ current->thread.trap_no = trap;
+
+ force_sig_info(info->si_signo, info, current);
+ } else {
+ die(str, regs, err);
+ }
+}
+
+static LIST_HEAD(undef_hook);
+static DEFINE_SPINLOCK(undef_lock);
+
+void register_undef_hook(struct undef_hook *hook)
+{
+ spin_lock_irq(&undef_lock);
+ list_add(&hook->node, &undef_hook);
+ spin_unlock_irq(&undef_lock);
+}
+
+void unregister_undef_hook(struct undef_hook *hook)
+{
+ spin_lock_irq(&undef_lock);
+ list_del(&hook->node);
+ spin_unlock_irq(&undef_lock);
+}
+
+asmlinkage void do_undefinstr(struct pt_regs *regs)
+{
+ unsigned int correction = thumb_mode(regs) ? 2 : 4;
+ unsigned int instr;
+ struct undef_hook *hook;
+ siginfo_t info;
+ void __user *pc;
+
+ /*
+ * According to the ARM ARM, PC is 2 or 4 bytes ahead,
+ * depending whether we're in Thumb mode or not.
+ * Correct this offset.
+ */
+ regs->ARM_pc -= correction;
+
+ pc = (void __user *)instruction_pointer(regs);
+ if (thumb_mode(regs)) {
+ get_user(instr, (u16 __user *)pc);
+ } else {
+ get_user(instr, (u32 __user *)pc);
+ }
+
+ spin_lock_irq(&undef_lock);
+ list_for_each_entry(hook, &undef_hook, node) {
+ if ((instr & hook->instr_mask) == hook->instr_val &&
+ (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) {
+ if (hook->fn(regs, instr) == 0) {
+ spin_unlock_irq(&undef_lock);
+ return;
+ }
+ }
+ }
+ spin_unlock_irq(&undef_lock);
+
+#ifdef CONFIG_DEBUG_USER
+ if (user_debug & UDBG_UNDEFINED) {
+ printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
+ current->comm, current->pid, pc);
+ dump_instr(regs);
+ }
+#endif
+
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPC;
+ info.si_addr = pc;
+
+ notify_die("Oops - undefined instruction", regs, &info, 0, 6);
+}
+
+asmlinkage void do_unexp_fiq (struct pt_regs *regs)
+{
+#ifndef CONFIG_IGNORE_FIQ
+ printk("Hmm. Unexpected FIQ received, but trying to continue\n");
+ printk("You may have a hardware problem...\n");
+#endif
+}
+
+/*
+ * bad_mode handles the impossible case in the vectors. If you see one of
+ * these, then it's extremely serious, and could mean you have buggy hardware.
+ * It never returns, and never tries to sync. We hope that we can at least
+ * dump out some state information...
+ */
+asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode)
+{
+ console_verbose();
+
+ printk(KERN_CRIT "Bad mode in %s handler detected: mode %s\n",
+ handler[reason], processor_modes[proc_mode]);
+
+ die("Oops - bad mode", regs, 0);
+ local_irq_disable();
+ panic("bad mode");
+}
+
+static int bad_syscall(int n, struct pt_regs *regs)
+{
+ struct thread_info *thread = current_thread_info();
+ siginfo_t info;
+
+ if (current->personality != PER_LINUX && thread->exec_domain->handler) {
+ thread->exec_domain->handler(n, regs);
+ return regs->ARM_r0;
+ }
+
+#ifdef CONFIG_DEBUG_USER
+ if (user_debug & UDBG_SYSCALL) {
+ printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
+ current->pid, current->comm, n);
+ dump_instr(regs);
+ }
+#endif
+
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLTRP;
+ info.si_addr = (void __user *)instruction_pointer(regs) -
+ (thumb_mode(regs) ? 2 : 4);
+
+ notify_die("Oops - bad syscall", regs, &info, n, 0);
+
+ return regs->ARM_r0;
+}
+
+static inline void
+do_cache_op(unsigned long start, unsigned long end, int flags)
+{
+ struct vm_area_struct *vma;
+
+ if (end < start || flags)
+ return;
+
+ vma = find_vma(current->active_mm, start);
+ if (vma && vma->vm_start < end) {
+ if (start < vma->vm_start)
+ start = vma->vm_start;
+ if (end > vma->vm_end)
+ end = vma->vm_end;
+
+ flush_cache_user_range(vma, start, end);
+ }
+}
+
+/*
+ * Handle all unrecognised system calls.
+ * 0x9f0000 - 0x9fffff are some more esoteric system calls
+ */
+#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
+asmlinkage int arm_syscall(int no, struct pt_regs *regs)
+{
+ struct thread_info *thread = current_thread_info();
+ siginfo_t info;
+
+ if ((no >> 16) != 0x9f)
+ return bad_syscall(no, regs);
+
+ switch (no & 0xffff) {
+ case 0: /* branch through 0 */
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = NULL;
+
+ notify_die("branch through zero", regs, &info, 0, 0);
+ return 0;
+
+ case NR(breakpoint): /* SWI BREAK_POINT */
+ regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
+ ptrace_break(current, regs);
+ return regs->ARM_r0;
+
+ /*
+ * Flush a region from virtual address 'r0' to virtual address 'r1'
+ * _exclusive_. There is no alignment requirement on either address;
+ * user space does not need to know the hardware cache layout.
+ *
+ * r2 contains flags. It should ALWAYS be passed as ZERO until it
+ * is defined to be something else. For now we ignore it, but may
+ * the fires of hell burn in your belly if you break this rule. ;)
+ *
+ * (at a later date, we may want to allow this call to not flush
+ * various aspects of the cache. Passing '0' will guarantee that
+ * everything necessary gets flushed to maintain consistency in
+ * the specified region).
+ */
+ case NR(cacheflush):
+ do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
+ return 0;
+
+ case NR(usr26):
+ if (!(elf_hwcap & HWCAP_26BIT))
+ break;
+ regs->ARM_cpsr &= ~MODE32_BIT;
+ return regs->ARM_r0;
+
+ case NR(usr32):
+ if (!(elf_hwcap & HWCAP_26BIT))
+ break;
+ regs->ARM_cpsr |= MODE32_BIT;
+ return regs->ARM_r0;
+
+ case NR(set_tls):
+ thread->tp_value = regs->ARM_r0;
+ /*
+ * Our user accessible TLS ptr is located at 0xffff0ffc.
+ * On SMP read access to this address must raise a fault
+ * and be emulated from the data abort handler.
+ * m
+ */
+ *((unsigned long *)0xffff0ffc) = thread->tp_value;
+ return 0;
+
+ default:
+ /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
+ if not implemented, rather than raising SIGILL. This
+ way the calling program can gracefully determine whether
+ a feature is supported. */
+ if (no <= 0x7ff)
+ return -ENOSYS;
+ break;
+ }
+#ifdef CONFIG_DEBUG_USER
+ /*
+ * experience shows that these seem to indicate that
+ * something catastrophic has happened
+ */
+ if (user_debug & UDBG_SYSCALL) {
+ printk("[%d] %s: arm syscall %d\n",
+ current->pid, current->comm, no);
+ dump_instr(regs);
+ if (user_mode(regs)) {
+ show_regs(regs);
+ c_backtrace(regs->ARM_fp, processor_mode(regs));
+ }
+ }
+#endif
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLTRP;
+ info.si_addr = (void __user *)instruction_pointer(regs) -
+ (thumb_mode(regs) ? 2 : 4);
+
+ notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
+ return 0;
+}
+
+void __bad_xchg(volatile void *ptr, int size)
+{
+ printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
+ __builtin_return_address(0), ptr, size);
+ BUG();
+}
+EXPORT_SYMBOL(__bad_xchg);
+
+/*
+ * A data abort trap was taken, but we did not handle the instruction.
+ * Try to abort the user program, or panic if it was the kernel.
+ */
+asmlinkage void
+baddataabort(int code, unsigned long instr, struct pt_regs *regs)
+{
+ unsigned long addr = instruction_pointer(regs);
+ siginfo_t info;
+
+#ifdef CONFIG_DEBUG_USER
+ if (user_debug & UDBG_BADABORT) {
+ printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
+ current->pid, current->comm, code, instr);
+ dump_instr(regs);
+ show_pte(current->mm, addr);
+ }
+#endif
+
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPC;
+ info.si_addr = (void __user *)addr;
+
+ notify_die("unknown data abort code", regs, &info, instr, 0);
+}
+
+volatile void __bug(const char *file, int line, void *data)
+{
+ printk(KERN_CRIT"kernel BUG at %s:%d!", file, line);
+ if (data)
+ printk(" - extra data = %p", data);
+ printk("\n");
+ *(int *)0 = 0;
+}
+EXPORT_SYMBOL(__bug);
+
+void __readwrite_bug(const char *fn)
+{
+ printk("%s called, but not implemented\n", fn);
+ BUG();
+}
+EXPORT_SYMBOL(__readwrite_bug);
+
+void __pte_error(const char *file, int line, unsigned long val)
+{
+ printk("%s:%d: bad pte %08lx.\n", file, line, val);
+}
+
+void __pmd_error(const char *file, int line, unsigned long val)
+{
+ printk("%s:%d: bad pmd %08lx.\n", file, line, val);
+}
+
+void __pgd_error(const char *file, int line, unsigned long val)
+{
+ printk("%s:%d: bad pgd %08lx.\n", file, line, val);
+}
+
+asmlinkage void __div0(void)
+{
+ printk("Division by zero in kernel.\n");
+ dump_stack();
+}
+EXPORT_SYMBOL(__div0);
+
+void abort(void)
+{
+ BUG();
+
+ /* if that doesn't kill us, halt */
+ panic("Oops failed to kill thread");
+}
+EXPORT_SYMBOL(abort);
+
+void __init trap_init(void)
+{
+ extern void __trap_init(void);
+
+ __trap_init();
+ flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE);
+ modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
new file mode 100644
index 00000000000..a39c6a42d68
--- /dev/null
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -0,0 +1,166 @@
+/* ld script to make ARM Linux kernel
+ * taken from the i386 version by Russell King
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <linux/config.h>
+
+OUTPUT_ARCH(arm)
+ENTRY(stext)
+#ifndef __ARMEB__
+jiffies = jiffies_64;
+#else
+jiffies = jiffies_64 + 4;
+#endif
+SECTIONS
+{
+ . = TEXTADDR;
+ .init : { /* Init code and data */
+ _stext = .;
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+ __proc_info_begin = .;
+ *(.proc.info)
+ __proc_info_end = .;
+ __arch_info_begin = .;
+ *(.arch.info)
+ __arch_info_end = .;
+ __tagtable_begin = .;
+ *(.taglist)
+ __tagtable_end = .;
+ . = ALIGN(16);
+ __setup_start = .;
+ *(.init.setup)
+ __setup_end = .;
+ __early_begin = .;
+ *(__early_param)
+ __early_end = .;
+ __initcall_start = .;
+ *(.initcall1.init)
+ *(.initcall2.init)
+ *(.initcall3.init)
+ *(.initcall4.init)
+ *(.initcall5.init)
+ *(.initcall6.init)
+ *(.initcall7.init)
+ __initcall_end = .;
+ __con_initcall_start = .;
+ *(.con_initcall.init)
+ __con_initcall_end = .;
+ __security_initcall_start = .;
+ *(.security_initcall.init)
+ __security_initcall_end = .;
+ . = ALIGN(32);
+ __initramfs_start = .;
+ usr/built-in.o(.init.ramfs)
+ __initramfs_end = .;
+ . = ALIGN(64);
+ __per_cpu_start = .;
+ *(.data.percpu)
+ __per_cpu_end = .;
+#ifndef CONFIG_XIP_KERNEL
+ __init_begin = _stext;
+ *(.init.data)
+ . = ALIGN(4096);
+ __init_end = .;
+#endif
+ }
+
+ /DISCARD/ : { /* Exit code and data */
+ *(.exit.text)
+ *(.exit.data)
+ *(.exitcall.exit)
+ }
+
+ .text : { /* Real text segment */
+ _text = .; /* Text and read-only data */
+ *(.text)
+ SCHED_TEXT
+ LOCK_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ *(.rodata)
+ *(.rodata.*)
+ *(.glue_7)
+ *(.glue_7t)
+ *(.got) /* Global offset table */
+ }
+
+ . = ALIGN(16);
+ __ex_table : { /* Exception table */
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+ }
+
+ RODATA
+
+ _etext = .; /* End of text and rodata section */
+
+#ifdef CONFIG_XIP_KERNEL
+ __data_loc = ALIGN(4); /* location in binary */
+ . = DATAADDR;
+#else
+ . = ALIGN(8192);
+ __data_loc = .;
+#endif
+
+ .data : AT(__data_loc) {
+ __data_start = .; /* address in memory */
+
+ /*
+ * first, the init task union, aligned
+ * to an 8192 byte boundary.
+ */
+ *(.init.task)
+
+#ifdef CONFIG_XIP_KERNEL
+ . = ALIGN(4096);
+ __init_begin = .;
+ *(.init.data)
+ . = ALIGN(4096);
+ __init_end = .;
+#endif
+
+ . = ALIGN(4096);
+ __nosave_begin = .;
+ *(.data.nosave)
+ . = ALIGN(4096);
+ __nosave_end = .;
+
+ /*
+ * then the cacheline aligned data
+ */
+ . = ALIGN(32);
+ *(.data.cacheline_aligned)
+
+ /*
+ * and the usual data section
+ */
+ *(.data)
+ CONSTRUCTORS
+
+ _edata = .;
+ }
+
+ .bss : {
+ __bss_start = .; /* BSS */
+ *(.bss)
+ *(COMMON)
+ _end = .;
+ }
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}
+
+/* those must never be empty */
+ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
+ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")