diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/Makefile | 7 | ||||
-rw-r--r-- | kernel/irq/autoprobe.c | 196 | ||||
-rw-r--r-- | kernel/irq/chip.c | 760 | ||||
-rw-r--r-- | kernel/irq/devres.c | 94 | ||||
-rw-r--r-- | kernel/irq/handle.c | 561 | ||||
-rw-r--r-- | kernel/irq/internals.h | 99 | ||||
-rw-r--r-- | kernel/irq/manage.c | 1118 | ||||
-rw-r--r-- | kernel/irq/migration.c | 68 | ||||
-rw-r--r-- | kernel/irq/numa_migrate.c | 119 | ||||
-rw-r--r-- | kernel/irq/pm.c | 79 | ||||
-rw-r--r-- | kernel/irq/proc.c | 280 | ||||
-rw-r--r-- | kernel/irq/resend.c | 81 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 301 |
13 files changed, 3763 insertions, 0 deletions
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile new file mode 100644 index 00000000000..7d047808419 --- /dev/null +++ b/kernel/irq/Makefile @@ -0,0 +1,7 @@ + +obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o +obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o +obj-$(CONFIG_PROC_FS) += proc.o +obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o +obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o +obj-$(CONFIG_PM_SLEEP) += pm.o diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c new file mode 100644 index 00000000000..2295a31ef11 --- /dev/null +++ b/kernel/irq/autoprobe.c @@ -0,0 +1,196 @@ +/* + * linux/kernel/irq/autoprobe.c + * + * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar + * + * This file contains the interrupt probing code and driver APIs. + */ + +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/async.h> + +#include "internals.h" + +/* + * Autodetection depends on the fact that any interrupt that + * comes in on to an unassigned handler will get stuck with + * "IRQ_WAITING" cleared and the interrupt disabled. + */ +static DEFINE_MUTEX(probing_active); + +/** + * probe_irq_on - begin an interrupt autodetect + * + * Commence probing for an interrupt. The interrupts are scanned + * and a mask of potential interrupt lines is returned. + * + */ +unsigned long probe_irq_on(void) +{ + struct irq_desc *desc; + unsigned long mask = 0; + unsigned int status; + int i; + + /* + * quiesce the kernel, or at least the asynchronous portion + */ + async_synchronize_full(); + mutex_lock(&probing_active); + /* + * something may have generated an irq long ago and we want to + * flush such a longstanding irq before considering it as spurious. + */ + for_each_irq_desc_reverse(i, desc) { + raw_spin_lock_irq(&desc->lock); + if (!desc->action && !(desc->status & IRQ_NOPROBE)) { + /* + * An old-style architecture might still have + * the handle_bad_irq handler there: + */ + compat_irq_chip_set_default_handler(desc); + + /* + * Some chips need to know about probing in + * progress: + */ + if (desc->chip->set_type) + desc->chip->set_type(i, IRQ_TYPE_PROBE); + desc->chip->startup(i); + } + raw_spin_unlock_irq(&desc->lock); + } + + /* Wait for longstanding interrupts to trigger. */ + msleep(20); + + /* + * enable any unassigned irqs + * (we must startup again here because if a longstanding irq + * happened in the previous stage, it may have masked itself) + */ + for_each_irq_desc_reverse(i, desc) { + raw_spin_lock_irq(&desc->lock); + if (!desc->action && !(desc->status & IRQ_NOPROBE)) { + desc->status |= IRQ_AUTODETECT | IRQ_WAITING; + if (desc->chip->startup(i)) + desc->status |= IRQ_PENDING; + } + raw_spin_unlock_irq(&desc->lock); + } + + /* + * Wait for spurious interrupts to trigger + */ + msleep(100); + + /* + * Now filter out any obviously spurious interrupts + */ + for_each_irq_desc(i, desc) { + raw_spin_lock_irq(&desc->lock); + status = desc->status; + + if (status & IRQ_AUTODETECT) { + /* It triggered already - consider it spurious. */ + if (!(status & IRQ_WAITING)) { + desc->status = status & ~IRQ_AUTODETECT; + desc->chip->shutdown(i); + } else + if (i < 32) + mask |= 1 << i; + } + raw_spin_unlock_irq(&desc->lock); + } + + return mask; +} +EXPORT_SYMBOL(probe_irq_on); + +/** + * probe_irq_mask - scan a bitmap of interrupt lines + * @val: mask of interrupts to consider + * + * Scan the interrupt lines and return a bitmap of active + * autodetect interrupts. The interrupt probe logic state + * is then returned to its previous value. + * + * Note: we need to scan all the irq's even though we will + * only return autodetect irq numbers - just so that we reset + * them all to a known state. + */ +unsigned int probe_irq_mask(unsigned long val) +{ + unsigned int status, mask = 0; + struct irq_desc *desc; + int i; + + for_each_irq_desc(i, desc) { + raw_spin_lock_irq(&desc->lock); + status = desc->status; + + if (status & IRQ_AUTODETECT) { + if (i < 16 && !(status & IRQ_WAITING)) + mask |= 1 << i; + + desc->status = status & ~IRQ_AUTODETECT; + desc->chip->shutdown(i); + } + raw_spin_unlock_irq(&desc->lock); + } + mutex_unlock(&probing_active); + + return mask & val; +} +EXPORT_SYMBOL(probe_irq_mask); + +/** + * probe_irq_off - end an interrupt autodetect + * @val: mask of potential interrupts (unused) + * + * Scans the unused interrupt lines and returns the line which + * appears to have triggered the interrupt. If no interrupt was + * found then zero is returned. If more than one interrupt is + * found then minus the first candidate is returned to indicate + * their is doubt. + * + * The interrupt probe logic state is returned to its previous + * value. + * + * BUGS: When used in a module (which arguably shouldn't happen) + * nothing prevents two IRQ probe callers from overlapping. The + * results of this are non-optimal. + */ +int probe_irq_off(unsigned long val) +{ + int i, irq_found = 0, nr_of_irqs = 0; + struct irq_desc *desc; + unsigned int status; + + for_each_irq_desc(i, desc) { + raw_spin_lock_irq(&desc->lock); + status = desc->status; + + if (status & IRQ_AUTODETECT) { + if (!(status & IRQ_WAITING)) { + if (!nr_of_irqs) + irq_found = i; + nr_of_irqs++; + } + desc->status = status & ~IRQ_AUTODETECT; + desc->chip->shutdown(i); + } + raw_spin_unlock_irq(&desc->lock); + } + mutex_unlock(&probing_active); + + if (nr_of_irqs > 1) + irq_found = -irq_found; + + return irq_found; +} +EXPORT_SYMBOL(probe_irq_off); + diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c new file mode 100644 index 00000000000..71eba24a39a --- /dev/null +++ b/kernel/irq/chip.c @@ -0,0 +1,760 @@ +/* + * linux/kernel/irq/chip.c + * + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006, Thomas Gleixner, Russell King + * + * This file contains the core interrupt handling code, for irq-chip + * based architectures. + * + * Detailed information is available in Documentation/DocBook/genericirq + */ + +#include <linux/irq.h> +#include <linux/msi.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/kernel_stat.h> + +#include "internals.h" + +static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) +{ + struct irq_desc *desc; + unsigned long flags; + + desc = irq_to_desc(irq); + if (!desc) { + WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); + return; + } + + /* Ensure we don't have left over values from a previous use of this irq */ + raw_spin_lock_irqsave(&desc->lock, flags); + desc->status = IRQ_DISABLED; + desc->chip = &no_irq_chip; + desc->handle_irq = handle_bad_irq; + desc->depth = 1; + desc->msi_desc = NULL; + desc->handler_data = NULL; + if (!keep_chip_data) + desc->chip_data = NULL; + desc->action = NULL; + desc->irq_count = 0; + desc->irqs_unhandled = 0; +#ifdef CONFIG_SMP + cpumask_setall(desc->affinity); +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_clear(desc->pending_mask); +#endif +#endif + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + +/** + * dynamic_irq_init - initialize a dynamically allocated irq + * @irq: irq number to initialize + */ +void dynamic_irq_init(unsigned int irq) +{ + dynamic_irq_init_x(irq, false); +} + +/** + * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq + * @irq: irq number to initialize + * + * does not set irq_to_desc(irq)->chip_data to NULL + */ +void dynamic_irq_init_keep_chip_data(unsigned int irq) +{ + dynamic_irq_init_x(irq, true); +} + +static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); + return; + } + + raw_spin_lock_irqsave(&desc->lock, flags); + if (desc->action) { + raw_spin_unlock_irqrestore(&desc->lock, flags); + WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", + irq); + return; + } + desc->msi_desc = NULL; + desc->handler_data = NULL; + if (!keep_chip_data) + desc->chip_data = NULL; + desc->handle_irq = handle_bad_irq; + desc->chip = &no_irq_chip; + desc->name = NULL; + clear_kstat_irqs(desc); + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + +/** + * dynamic_irq_cleanup - cleanup a dynamically allocated irq + * @irq: irq number to initialize + */ +void dynamic_irq_cleanup(unsigned int irq) +{ + dynamic_irq_cleanup_x(irq, false); +} + +/** + * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq + * @irq: irq number to initialize + * + * does not set irq_to_desc(irq)->chip_data to NULL + */ +void dynamic_irq_cleanup_keep_chip_data(unsigned int irq) +{ + dynamic_irq_cleanup_x(irq, true); +} + + +/** + * set_irq_chip - set the irq chip for an irq + * @irq: irq number + * @chip: pointer to irq chip description structure + */ +int set_irq_chip(unsigned int irq, struct irq_chip *chip) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq); + return -EINVAL; + } + + if (!chip) + chip = &no_irq_chip; + + raw_spin_lock_irqsave(&desc->lock, flags); + irq_chip_set_defaults(chip); + desc->chip = chip; + raw_spin_unlock_irqrestore(&desc->lock, flags); + + return 0; +} +EXPORT_SYMBOL(set_irq_chip); + +/** + * set_irq_type - set the irq trigger type for an irq + * @irq: irq number + * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h + */ +int set_irq_type(unsigned int irq, unsigned int type) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + int ret = -ENXIO; + + if (!desc) { + printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); + return -ENODEV; + } + + type &= IRQ_TYPE_SENSE_MASK; + if (type == IRQ_TYPE_NONE) + return 0; + + raw_spin_lock_irqsave(&desc->lock, flags); + ret = __irq_set_trigger(desc, irq, type); + raw_spin_unlock_irqrestore(&desc->lock, flags); + return ret; +} +EXPORT_SYMBOL(set_irq_type); + +/** + * set_irq_data - set irq type data for an irq + * @irq: Interrupt number + * @data: Pointer to interrupt specific data + * + * Set the hardware irq controller data for an irq + */ +int set_irq_data(unsigned int irq, void *data) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + printk(KERN_ERR + "Trying to install controller data for IRQ%d\n", irq); + return -EINVAL; + } + + raw_spin_lock_irqsave(&desc->lock, flags); + desc->handler_data = data; + raw_spin_unlock_irqrestore(&desc->lock, flags); + return 0; +} +EXPORT_SYMBOL(set_irq_data); + +/** + * set_irq_msi - set MSI descriptor data for an irq + * @irq: Interrupt number + * @entry: Pointer to MSI descriptor data + * + * Set the MSI descriptor entry for an irq + */ +int set_irq_msi(unsigned int irq, struct msi_desc *entry) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + printk(KERN_ERR + "Trying to install msi data for IRQ%d\n", irq); + return -EINVAL; + } + + raw_spin_lock_irqsave(&desc->lock, flags); + desc->msi_desc = entry; + if (entry) + entry->irq = irq; + raw_spin_unlock_irqrestore(&desc->lock, flags); + return 0; +} + +/** + * set_irq_chip_data - set irq chip data for an irq + * @irq: Interrupt number + * @data: Pointer to chip specific data + * + * Set the hardware irq chip data for an irq + */ +int set_irq_chip_data(unsigned int irq, void *data) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + printk(KERN_ERR + "Trying to install chip data for IRQ%d\n", irq); + return -EINVAL; + } + + if (!desc->chip) { + printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); + return -EINVAL; + } + + raw_spin_lock_irqsave(&desc->lock, flags); + desc->chip_data = data; + raw_spin_unlock_irqrestore(&desc->lock, flags); + + return 0; +} +EXPORT_SYMBOL(set_irq_chip_data); + +/** + * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq + * + * @irq: Interrupt number + * @nest: 0 to clear / 1 to set the IRQ_NESTED_THREAD flag + * + * The IRQ_NESTED_THREAD flag indicates that on + * request_threaded_irq() no separate interrupt thread should be + * created for the irq as the handler are called nested in the + * context of a demultiplexing interrupt handler thread. + */ +void set_irq_nested_thread(unsigned int irq, int nest) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) + return; + + raw_spin_lock_irqsave(&desc->lock, flags); + if (nest) + desc->status |= IRQ_NESTED_THREAD; + else + desc->status &= ~IRQ_NESTED_THREAD; + raw_spin_unlock_irqrestore(&desc->lock, flags); +} +EXPORT_SYMBOL_GPL(set_irq_nested_thread); + +/* + * default enable function + */ +static void default_enable(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + desc->chip->unmask(irq); + desc->status &= ~IRQ_MASKED; +} + +/* + * default disable function + */ +static void default_disable(unsigned int irq) +{ +} + +/* + * default startup function + */ +static unsigned int default_startup(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + desc->chip->enable(irq); + return 0; +} + +/* + * default shutdown function + */ +static void default_shutdown(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + desc->chip->mask(irq); + desc->status |= IRQ_MASKED; +} + +/* + * Fixup enable/disable function pointers + */ +void irq_chip_set_defaults(struct irq_chip *chip) +{ + if (!chip->enable) + chip->enable = default_enable; + if (!chip->disable) + chip->disable = default_disable; + if (!chip->startup) + chip->startup = default_startup; + /* + * We use chip->disable, when the user provided its own. When + * we have default_disable set for chip->disable, then we need + * to use default_shutdown, otherwise the irq line is not + * disabled on free_irq(): + */ + if (!chip->shutdown) + chip->shutdown = chip->disable != default_disable ? + chip->disable : default_shutdown; + if (!chip->name) + chip->name = chip->typename; + if (!chip->end) + chip->end = dummy_irq_chip.end; +} + +static inline void mask_ack_irq(struct irq_desc *desc, int irq) +{ + if (desc->chip->mask_ack) + desc->chip->mask_ack(irq); + else { + desc->chip->mask(irq); + if (desc->chip->ack) + desc->chip->ack(irq); + } + desc->status |= IRQ_MASKED; +} + +static inline void mask_irq(struct irq_desc *desc, int irq) +{ + if (desc->chip->mask) { + desc->chip->mask(irq); + desc->status |= IRQ_MASKED; + } +} + +static inline void unmask_irq(struct irq_desc *desc, int irq) +{ + if (desc->chip->unmask) { + desc->chip->unmask(irq); + desc->status &= ~IRQ_MASKED; + } +} + +/* + * handle_nested_irq - Handle a nested irq from a irq thread + * @irq: the interrupt number + * + * Handle interrupts which are nested into a threaded interrupt + * handler. The handler function is called inside the calling + * threads context. + */ +void handle_nested_irq(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *action; + irqreturn_t action_ret; + + might_sleep(); + + raw_spin_lock_irq(&desc->lock); + + kstat_incr_irqs_this_cpu(irq, desc); + + action = desc->action; + if (unlikely(!action || (desc->status & IRQ_DISABLED))) + goto out_unlock; + + desc->status |= IRQ_INPROGRESS; + raw_spin_unlock_irq(&desc->lock); + + action_ret = action->thread_fn(action->irq, action->dev_id); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + + raw_spin_lock_irq(&desc->lock); + desc->status &= ~IRQ_INPROGRESS; + +out_unlock: + raw_spin_unlock_irq(&desc->lock); +} +EXPORT_SYMBOL_GPL(handle_nested_irq); + +/** + * handle_simple_irq - Simple and software-decoded IRQs. + * @irq: the interrupt number + * @desc: the interrupt description structure for this irq + * + * Simple interrupts are either sent from a demultiplexing interrupt + * handler or come from hardware, where no interrupt hardware control + * is necessary. + * + * Note: The caller is expected to handle the ack, clear, mask and + * unmask issues if necessary. + */ +void +handle_simple_irq(unsigned int irq, struct irq_desc *desc) +{ + struct irqaction *action; + irqreturn_t action_ret; + + raw_spin_lock(&desc->lock); + + if (unlikely(desc->status & IRQ_INPROGRESS)) + goto out_unlock; + desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); + kstat_incr_irqs_this_cpu(irq, desc); + + action = desc->action; + if (unlikely(!action || (desc->status & IRQ_DISABLED))) + goto out_unlock; + + desc->status |= IRQ_INPROGRESS; + raw_spin_unlock(&desc->lock); + + action_ret = handle_IRQ_event(irq, action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + + raw_spin_lock(&desc->lock); + desc->status &= ~IRQ_INPROGRESS; +out_unlock: + raw_spin_unlock(&desc->lock); +} + +/** + * handle_level_irq - Level type irq handler + * @irq: the interrupt number + * @desc: the interrupt description structure for this irq + * + * Level type interrupts are active as long as the hardware line has + * the active level. This may require to mask the interrupt and unmask + * it after the associated handler has acknowledged the device, so the + * interrupt line is back to inactive. + */ +void +handle_level_irq(unsigned int irq, struct irq_desc *desc) +{ + struct irqaction *action; + irqreturn_t action_ret; + + raw_spin_lock(&desc->lock); + mask_ack_irq(desc, irq); + + if (unlikely(desc->status & IRQ_INPROGRESS)) + goto out_unlock; + desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); + kstat_incr_irqs_this_cpu(irq, desc); + + /* + * If its disabled or no action available + * keep it masked and get out of here + */ + action = desc->action; + if (unlikely(!action || (desc->status & IRQ_DISABLED))) + goto out_unlock; + + desc->status |= IRQ_INPROGRESS; + raw_spin_unlock(&desc->lock); + + action_ret = handle_IRQ_event(irq, action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + + raw_spin_lock(&desc->lock); + desc->status &= ~IRQ_INPROGRESS; + + if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) + unmask_irq(desc, irq); +out_unlock: + raw_spin_unlock(&desc->lock); +} +EXPORT_SYMBOL_GPL(handle_level_irq); + +/** + * handle_fasteoi_irq - irq handler for transparent controllers + * @irq: the interrupt number + * @desc: the interrupt description structure for this irq + * + * Only a single callback will be issued to the chip: an ->eoi() + * call when the interrupt has been serviced. This enables support + * for modern forms of interrupt handlers, which handle the flow + * details in hardware, transparently. + */ +void +handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) +{ + struct irqaction *action; + irqreturn_t action_ret; + + raw_spin_lock(&desc->lock); + + if (unlikely(desc->status & IRQ_INPROGRESS)) + goto out; + + desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); + kstat_incr_irqs_this_cpu(irq, desc); + + /* + * If its disabled or no action available + * then mask it and get out of here: + */ + action = desc->action; + if (unlikely(!action || (desc->status & IRQ_DISABLED))) { + desc->status |= IRQ_PENDING; + mask_irq(desc, irq); + goto out; + } + + desc->status |= IRQ_INPROGRESS; + desc->status &= ~IRQ_PENDING; + raw_spin_unlock(&desc->lock); + + action_ret = handle_IRQ_event(irq, action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + + raw_spin_lock(&desc->lock); + desc->status &= ~IRQ_INPROGRESS; +out: + desc->chip->eoi(irq); + + raw_spin_unlock(&desc->lock); +} + +/** + * handle_edge_irq - edge type IRQ handler + * @irq: the interrupt number + * @desc: the interrupt description structure for this irq + * + * Interrupt occures on the falling and/or rising edge of a hardware + * signal. The occurence is latched into the irq controller hardware + * and must be acked in order to be reenabled. After the ack another + * interrupt can happen on the same source even before the first one + * is handled by the assosiacted event handler. If this happens it + * might be necessary to disable (mask) the interrupt depending on the + * controller hardware. This requires to reenable the interrupt inside + * of the loop which handles the interrupts which have arrived while + * the handler was running. If all pending interrupts are handled, the + * loop is left. + */ +void +handle_edge_irq(unsigned int irq, struct irq_desc *desc) +{ + raw_spin_lock(&desc->lock); + + desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); + + /* + * If we're currently running this IRQ, or its disabled, + * we shouldn't process the IRQ. Mark it pending, handle + * the necessary masking and go out + */ + if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || + !desc->action)) { + desc->status |= (IRQ_PENDING | IRQ_MASKED); + mask_ack_irq(desc, irq); + goto out_unlock; + } + kstat_incr_irqs_this_cpu(irq, desc); + + /* Start handling the irq */ + if (desc->chip->ack) + desc->chip->ack(irq); + + /* Mark the IRQ currently in progress.*/ + desc->status |= IRQ_INPROGRESS; + + do { + struct irqaction *action = desc->action; + irqreturn_t action_ret; + + if (unlikely(!action)) { + mask_irq(desc, irq); + goto out_unlock; + } + + /* + * When another irq arrived while we were handling + * one, we could have masked the irq. + * Renable it, if it was not disabled in meantime. + */ + if (unlikely((desc->status & + (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == + (IRQ_PENDING | IRQ_MASKED))) { + unmask_irq(desc, irq); + } + + desc->status &= ~IRQ_PENDING; + raw_spin_unlock(&desc->lock); + action_ret = handle_IRQ_event(irq, action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + raw_spin_lock(&desc->lock); + + } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); + + desc->status &= ~IRQ_INPROGRESS; +out_unlock: + raw_spin_unlock(&desc->lock); +} + +/** + * handle_percpu_irq - Per CPU local irq handler + * @irq: the interrupt number + * @desc: the interrupt description structure for this irq + * + * Per CPU interrupts on SMP machines without locking requirements + */ +void +handle_percpu_irq(unsigned int irq, struct irq_desc *desc) +{ + irqreturn_t action_ret; + + kstat_incr_irqs_this_cpu(irq, desc); + + if (desc->chip->ack) + desc->chip->ack(irq); + + action_ret = handle_IRQ_event(irq, desc->action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + + if (desc->chip->eoi) + desc->chip->eoi(irq); +} + +void +__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, + const char *name) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + printk(KERN_ERR + "Trying to install type control for IRQ%d\n", irq); + return; + } + + if (!handle) + handle = handle_bad_irq; + else if (desc->chip == &no_irq_chip) { + printk(KERN_WARNING "Trying to install %sinterrupt handler " + "for IRQ%d\n", is_chained ? "chained " : "", irq); + /* + * Some ARM implementations install a handler for really dumb + * interrupt hardware without setting an irq_chip. This worked + * with the ARM no_irq_chip but the check in setup_irq would + * prevent us to setup the interrupt at all. Switch it to + * dummy_irq_chip for easy transition. + */ + desc->chip = &dummy_irq_chip; + } + + chip_bus_lock(irq, desc); + raw_spin_lock_irqsave(&desc->lock, flags); + + /* Uninstall? */ + if (handle == handle_bad_irq) { + if (desc->chip != &no_irq_chip) + mask_ack_irq(desc, irq); + desc->status |= IRQ_DISABLED; + desc->depth = 1; + } + desc->handle_irq = handle; + desc->name = name; + + if (handle != handle_bad_irq && is_chained) { + desc->status &= ~IRQ_DISABLED; + desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; + desc->depth = 0; + desc->chip->startup(irq); + } + raw_spin_unlock_irqrestore(&desc->lock, flags); + chip_bus_sync_unlock(irq, desc); +} +EXPORT_SYMBOL_GPL(__set_irq_handler); + +void +set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, + irq_flow_handler_t handle) +{ + set_irq_chip(irq, chip); + __set_irq_handler(irq, handle, 0, NULL); +} + +void +set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, + irq_flow_handler_t handle, const char *name) +{ + set_irq_chip(irq, chip); + __set_irq_handler(irq, handle, 0, name); +} + +void __init set_irq_noprobe(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq); + return; + } + + raw_spin_lock_irqsave(&desc->lock, flags); + desc->status |= IRQ_NOPROBE; + raw_spin_unlock_irqrestore(&desc->lock, flags); +} + +void __init set_irq_probe(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) { + printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq); + return; + } + + raw_spin_lock_irqsave(&desc->lock, flags); + desc->status &= ~IRQ_NOPROBE; + raw_spin_unlock_irqrestore(&desc->lock, flags); +} diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c new file mode 100644 index 00000000000..d06df9c41cb --- /dev/null +++ b/kernel/irq/devres.c @@ -0,0 +1,94 @@ +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/gfp.h> + +/* + * Device resource management aware IRQ request/free implementation. + */ +struct irq_devres { + unsigned int irq; + void *dev_id; +}; + +static void devm_irq_release(struct device *dev, void *res) +{ + struct irq_devres *this = res; + + free_irq(this->irq, this->dev_id); +} + +static int devm_irq_match(struct device *dev, void *res, void *data) +{ + struct irq_devres *this = res, *match = data; + + return this->irq == match->irq && this->dev_id == match->dev_id; +} + +/** + * devm_request_threaded_irq - allocate an interrupt line for a managed device + * @dev: device to request interrupt for + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs + * @thread_fn: function to be called in a threaded interrupt context. NULL + * for devices which handle everything in @handler + * @irqflags: Interrupt type flags + * @devname: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * Except for the extra @dev argument, this function takes the + * same arguments and performs the same function as + * request_irq(). IRQs requested with this function will be + * automatically freed on driver detach. + * + * If an IRQ allocated with this function needs to be freed + * separately, dev_free_irq() must be used. + */ +int devm_request_threaded_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, irq_handler_t thread_fn, + unsigned long irqflags, const char *devname, + void *dev_id) +{ + struct irq_devres *dr; + int rc; + + dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres), + GFP_KERNEL); + if (!dr) + return -ENOMEM; + + rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname, + dev_id); + if (rc) { + devres_free(dr); + return rc; + } + + dr->irq = irq; + dr->dev_id = dev_id; + devres_add(dev, dr); + + return 0; +} +EXPORT_SYMBOL(devm_request_threaded_irq); + +/** + * devm_free_irq - free an interrupt + * @dev: device to free interrupt for + * @irq: Interrupt line to free + * @dev_id: Device identity to free + * + * Except for the extra @dev argument, this function takes the + * same arguments and performs the same function as free_irq(). + * This function instead of free_irq() should be used to manually + * free IRQs allocated with dev_request_irq(). + */ +void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id) +{ + struct irq_devres match_data = { irq, dev_id }; + + free_irq(irq, dev_id); + WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match, + &match_data)); +} +EXPORT_SYMBOL(devm_free_irq); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c new file mode 100644 index 00000000000..814940e7f48 --- /dev/null +++ b/kernel/irq/handle.c @@ -0,0 +1,561 @@ +/* + * linux/kernel/irq/handle.c + * + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006, Thomas Gleixner, Russell King + * + * This file contains the core interrupt handling code. + * + * Detailed information is available in Documentation/DocBook/genericirq + * + */ + +#include <linux/irq.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/interrupt.h> +#include <linux/kernel_stat.h> +#include <linux/rculist.h> +#include <linux/hash.h> +#include <linux/bootmem.h> +#include <trace/events/irq.h> + +#include "internals.h" + +/* + * lockdep: we want to handle all irq_desc locks as a single lock-class: + */ +struct lock_class_key irq_desc_lock_class; + +/** + * handle_bad_irq - handle spurious and unhandled irqs + * @irq: the interrupt number + * @desc: description of the interrupt + * + * Handles spurious and unhandled IRQ's. It also prints a debugmessage. + */ +void handle_bad_irq(unsigned int irq, struct irq_desc *desc) +{ + print_irq_desc(irq, desc); + kstat_incr_irqs_this_cpu(irq, desc); + ack_bad_irq(irq); +} + +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) +static void __init init_irq_default_affinity(void) +{ + alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); + cpumask_setall(irq_default_affinity); +} +#else +static void __init init_irq_default_affinity(void) +{ +} +#endif + +/* + * Linux has a controller-independent interrupt architecture. + * Every controller has a 'controller-template', that is used + * by the main code to do the right thing. Each driver-visible + * interrupt source is transparently wired to the appropriate + * controller. Thus drivers need not be aware of the + * interrupt-controller. + * + * The code is designed to be easily extended with new/different + * interrupt controllers, without having to do assembly magic or + * having to touch the generic code. + * + * Controller mappings for all interrupt sources: + */ +int nr_irqs = NR_IRQS; +EXPORT_SYMBOL_GPL(nr_irqs); + +#ifdef CONFIG_SPARSE_IRQ + +static struct irq_desc irq_desc_init = { + .irq = -1, + .status = IRQ_DISABLED, + .chip = &no_irq_chip, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), +}; + +void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) +{ + void *ptr; + + if (slab_is_available()) + ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), + GFP_ATOMIC, node); + else + ptr = alloc_bootmem_node(NODE_DATA(node), + nr * sizeof(*desc->kstat_irqs)); + + /* + * don't overwite if can not get new one + * init_copy_kstat_irqs() could still use old one + */ + if (ptr) { + printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); + desc->kstat_irqs = ptr; + } +} + +static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) +{ + memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); + + raw_spin_lock_init(&desc->lock); + desc->irq = irq; +#ifdef CONFIG_SMP + desc->node = node; +#endif + lockdep_set_class(&desc->lock, &irq_desc_lock_class); + init_kstat_irqs(desc, node, nr_cpu_ids); + if (!desc->kstat_irqs) { + printk(KERN_ERR "can not alloc kstat_irqs\n"); + BUG_ON(1); + } + if (!alloc_desc_masks(desc, node, false)) { + printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); + BUG_ON(1); + } + init_desc_masks(desc); + arch_init_chip_data(desc, node); +} + +/* + * Protect the sparse_irqs: + */ +DEFINE_RAW_SPINLOCK(sparse_irq_lock); + +struct irq_desc **irq_desc_ptrs __read_mostly; + +static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { + [0 ... NR_IRQS_LEGACY-1] = { + .irq = -1, + .status = IRQ_DISABLED, + .chip = &no_irq_chip, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), + } +}; + +static unsigned int *kstat_irqs_legacy; + +int __init early_irq_init(void) +{ + struct irq_desc *desc; + int legacy_count; + int node; + int i; + + init_irq_default_affinity(); + + /* initialize nr_irqs based on nr_cpu_ids */ + arch_probe_nr_irqs(); + printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); + + desc = irq_desc_legacy; + legacy_count = ARRAY_SIZE(irq_desc_legacy); + node = first_online_node; + + /* allocate irq_desc_ptrs array based on nr_irqs */ + irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT); + + /* allocate based on nr_cpu_ids */ + kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * + sizeof(int), GFP_NOWAIT, node); + + for (i = 0; i < legacy_count; i++) { + desc[i].irq = i; +#ifdef CONFIG_SMP + desc[i].node = node; +#endif + desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); + alloc_desc_masks(&desc[i], node, true); + init_desc_masks(&desc[i]); + irq_desc_ptrs[i] = desc + i; + } + + for (i = legacy_count; i < nr_irqs; i++) + irq_desc_ptrs[i] = NULL; + + return arch_early_irq_init(); +} + +struct irq_desc *irq_to_desc(unsigned int irq) +{ + if (irq_desc_ptrs && irq < nr_irqs) + return irq_desc_ptrs[irq]; + + return NULL; +} + +struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) +{ + struct irq_desc *desc; + unsigned long flags; + + if (irq >= nr_irqs) { + WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", + irq, nr_irqs); + return NULL; + } + + desc = irq_desc_ptrs[irq]; + if (desc) + return desc; + + raw_spin_lock_irqsave(&sparse_irq_lock, flags); + + /* We have to check it to avoid races with another CPU */ + desc = irq_desc_ptrs[irq]; + if (desc) + goto out_unlock; + + if (slab_is_available()) + desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); + else + desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc)); + + printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); + if (!desc) { + printk(KERN_ERR "can not alloc irq_desc\n"); + BUG_ON(1); + } + init_one_irq_desc(irq, desc, node); + + irq_desc_ptrs[irq] = desc; + +out_unlock: + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + + return desc; +} + +#else /* !CONFIG_SPARSE_IRQ */ + +struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { + [0 ... NR_IRQS-1] = { + .status = IRQ_DISABLED, + .chip = &no_irq_chip, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), + } +}; + +static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; +int __init early_irq_init(void) +{ + struct irq_desc *desc; + int count; + int i; + + init_irq_default_affinity(); + + printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); + + desc = irq_desc; + count = ARRAY_SIZE(irq_desc); + + for (i = 0; i < count; i++) { + desc[i].irq = i; + alloc_desc_masks(&desc[i], 0, true); + init_desc_masks(&desc[i]); + desc[i].kstat_irqs = kstat_irqs_all[i]; + } + return arch_early_irq_init(); +} + +struct irq_desc *irq_to_desc(unsigned int irq) +{ + return (irq < NR_IRQS) ? irq_desc + irq : NULL; +} + +struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) +{ + return irq_to_desc(irq); +} +#endif /* !CONFIG_SPARSE_IRQ */ + +void clear_kstat_irqs(struct irq_desc *desc) +{ + memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); +} + +/* + * What should we do if we get a hw irq event on an illegal vector? + * Each architecture has to answer this themself. + */ +static void ack_bad(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + print_irq_desc(irq, desc); + ack_bad_irq(irq); +} + +/* + * NOP functions + */ +static void noop(unsigned int irq) +{ +} + +static unsigned int noop_ret(unsigned int irq) +{ + return 0; +} + +/* + * Generic no controller implementation + */ +struct irq_chip no_irq_chip = { + .name = "none", + .startup = noop_ret, + .shutdown = noop, + .enable = noop, + .disable = noop, + .ack = ack_bad, + .end = noop, +}; + +/* + * Generic dummy implementation which can be used for + * real dumb interrupt sources + */ +struct irq_chip dummy_irq_chip = { + .name = "dummy", + .startup = noop_ret, + .shutdown = noop, + .enable = noop, + .disable = noop, + .ack = noop, + .mask = noop, + .unmask = noop, + .end = noop, +}; + +/* + * Special, empty irq handler: + */ +irqreturn_t no_action(int cpl, void *dev_id) +{ + return IRQ_NONE; +} + +static void warn_no_thread(unsigned int irq, struct irqaction *action) +{ + if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) + return; + + printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " + "but no thread function available.", irq, action->name); +} + +/** + * handle_IRQ_event - irq action chain handler + * @irq: the interrupt number + * @action: the interrupt action chain for this irq + * + * Handles the action chain of an irq event + */ +irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) +{ + irqreturn_t ret, retval = IRQ_NONE; + unsigned int status = 0; + + if (!(action->flags & IRQF_DISABLED)) + local_irq_enable_in_hardirq(); + + do { + trace_irq_handler_entry(irq, action); + ret = action->handler(irq, action->dev_id); + trace_irq_handler_exit(irq, action, ret); + + switch (ret) { + case IRQ_WAKE_THREAD: + /* + * Set result to handled so the spurious check + * does not trigger. + */ + ret = IRQ_HANDLED; + + /* + * Catch drivers which return WAKE_THREAD but + * did not set up a thread function + */ + if (unlikely(!action->thread_fn)) { + warn_no_thread(irq, action); + break; + } + + /* + * Wake up the handler thread for this + * action. In case the thread crashed and was + * killed we just pretend that we handled the + * interrupt. The hardirq handler above has + * disabled the device interrupt, so no irq + * storm is lurking. + */ + if (likely(!test_bit(IRQTF_DIED, + &action->thread_flags))) { + set_bit(IRQTF_RUNTHREAD, &action->thread_flags); + wake_up_process(action->thread); + } + + /* Fall through to add to randomness */ + case IRQ_HANDLED: + status |= action->flags; + break; + + default: + break; + } + + retval |= ret; + action = action->next; + } while (action); + + if (status & IRQF_SAMPLE_RANDOM) + add_interrupt_randomness(irq); + local_irq_disable(); + + return retval; +} + +#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ + +#ifdef CONFIG_ENABLE_WARN_DEPRECATED +# warning __do_IRQ is deprecated. Please convert to proper flow handlers +#endif + +/** + * __do_IRQ - original all in one highlevel IRQ handler + * @irq: the interrupt number + * + * __do_IRQ handles all normal device IRQ's (the special + * SMP cross-CPU interrupts have their own specific + * handlers). + * + * This is the original x86 implementation which is used for every + * interrupt type. + */ +unsigned int __do_IRQ(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *action; + unsigned int status; + + kstat_incr_irqs_this_cpu(irq, desc); + + if (CHECK_IRQ_PER_CPU(desc->status)) { + irqreturn_t action_ret; + + /* + * No locking required for CPU-local interrupts: + */ + if (desc->chip->ack) + desc->chip->ack(irq); + if (likely(!(desc->status & IRQ_DISABLED))) { + action_ret = handle_IRQ_event(irq, desc->action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + } + desc->chip->end(irq); + return 1; + } + + raw_spin_lock(&desc->lock); + if (desc->chip->ack) + desc->chip->ack(irq); + /* + * REPLAY is when Linux resends an IRQ that was dropped earlier + * WAITING is used by probe to mark irqs that are being tested + */ + status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); + status |= IRQ_PENDING; /* we _want_ to handle it */ + + /* + * If the IRQ is disabled for whatever reason, we cannot + * use the action we have. + */ + action = NULL; + if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { + action = desc->action; + status &= ~IRQ_PENDING; /* we commit to handling */ + status |= IRQ_INPROGRESS; /* we are handling it */ + } + desc->status = status; + + /* + * If there is no IRQ handler or it was disabled, exit early. + * Since we set PENDING, if another processor is handling + * a different instance of this same irq, the other processor + * will take care of it. + */ + if (unlikely(!action)) + goto out; + + /* + * Edge triggered interrupts need to remember + * pending events. + * This applies to any hw interrupts that allow a second + * instance of the same irq to arrive while we are in do_IRQ + * or in the handler. But the code here only handles the _second_ + * instance of the irq, not the third or fourth. So it is mostly + * useful for irq hardware that does not mask cleanly in an + * SMP environment. + */ + for (;;) { + irqreturn_t action_ret; + + raw_spin_unlock(&desc->lock); + + action_ret = handle_IRQ_event(irq, action); + if (!noirqdebug) + note_interrupt(irq, desc, action_ret); + + raw_spin_lock(&desc->lock); + if (likely(!(desc->status & IRQ_PENDING))) + break; + desc->status &= ~IRQ_PENDING; + } + desc->status &= ~IRQ_INPROGRESS; + +out: + /* + * The ->end() handler has to deal with interrupts which got + * disabled while the handler was running. + */ + desc->chip->end(irq); + raw_spin_unlock(&desc->lock); + + return 1; +} +#endif + +void early_init_irq_lock_class(void) +{ + struct irq_desc *desc; + int i; + + for_each_irq_desc(i, desc) { + lockdep_set_class(&desc->lock, &irq_desc_lock_class); + } +} + +unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) +{ + struct irq_desc *desc = irq_to_desc(irq); + return desc ? desc->kstat_irqs[cpu] : 0; +} +EXPORT_SYMBOL(kstat_irqs_cpu); + diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h new file mode 100644 index 00000000000..b2821f070a3 --- /dev/null +++ b/kernel/irq/internals.h @@ -0,0 +1,99 @@ +/* + * IRQ subsystem internal functions and variables: + */ + +extern int noirqdebug; + +/* Set default functions for irq_chip structures: */ +extern void irq_chip_set_defaults(struct irq_chip *chip); + +/* Set default handler: */ +extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); + +extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, + unsigned long flags); +extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); +extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); + +extern struct lock_class_key irq_desc_lock_class; +extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); +extern void clear_kstat_irqs(struct irq_desc *desc); +extern raw_spinlock_t sparse_irq_lock; + +#ifdef CONFIG_SPARSE_IRQ +/* irq_desc_ptrs allocated at boot time */ +extern struct irq_desc **irq_desc_ptrs; +#else +/* irq_desc_ptrs is a fixed size array */ +extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; +#endif + +#ifdef CONFIG_PROC_FS +extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); +extern void register_handler_proc(unsigned int irq, struct irqaction *action); +extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); +#else +static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } +static inline void register_handler_proc(unsigned int irq, + struct irqaction *action) { } +static inline void unregister_handler_proc(unsigned int irq, + struct irqaction *action) { } +#endif + +extern int irq_select_affinity_usr(unsigned int irq); + +extern void irq_set_thread_affinity(struct irq_desc *desc); + +/* Inline functions for support of irq chips on slow busses */ +static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) +{ + if (unlikely(desc->chip->bus_lock)) + desc->chip->bus_lock(irq); +} + +static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) +{ + if (unlikely(desc->chip->bus_sync_unlock)) + desc->chip->bus_sync_unlock(irq); +} + +/* + * Debugging printout: + */ + +#include <linux/kallsyms.h> + +#define P(f) if (desc->status & f) printk("%14s set\n", #f) + +static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) +{ + printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", + irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); + printk("->handle_irq(): %p, ", desc->handle_irq); + print_symbol("%s\n", (unsigned long)desc->handle_irq); + printk("->chip(): %p, ", desc->chip); + print_symbol("%s\n", (unsigned long)desc->chip); + printk("->action(): %p\n", desc->action); + if (desc->action) { + printk("->action->handler(): %p, ", desc->action->handler); + print_symbol("%s\n", (unsigned long)desc->action->handler); + } + + P(IRQ_INPROGRESS); + P(IRQ_DISABLED); + P(IRQ_PENDING); + P(IRQ_REPLAY); + P(IRQ_AUTODETECT); + P(IRQ_WAITING); + P(IRQ_LEVEL); + P(IRQ_MASKED); +#ifdef CONFIG_IRQ_PER_CPU + P(IRQ_PER_CPU); +#endif + P(IRQ_NOPROBE); + P(IRQ_NOREQUEST); + P(IRQ_NOAUTOEN); +} + +#undef P + diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c new file mode 100644 index 00000000000..0b23ff71b9b --- /dev/null +++ b/kernel/irq/manage.c @@ -0,0 +1,1118 @@ +/* + * linux/kernel/irq/manage.c + * + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006 Thomas Gleixner + * + * This file contains driver APIs to the irq subsystem. + */ + +#include <linux/irq.h> +#include <linux/kthread.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/sched.h> + +#include "internals.h" + +/** + * synchronize_irq - wait for pending IRQ handlers (on other CPUs) + * @irq: interrupt number to wait for + * + * This function waits for any pending IRQ handlers for this interrupt + * to complete before returning. If you use this function while + * holding a resource the IRQ handler may need you will deadlock. + * + * This function may be called - with care - from IRQ context. + */ +void synchronize_irq(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned int status; + + if (!desc) + return; + + do { + unsigned long flags; + + /* + * Wait until we're out of the critical section. This might + * give the wrong answer due to the lack of memory barriers. + */ + while (desc->status & IRQ_INPROGRESS) + cpu_relax(); + + /* Ok, that indicated we're done: double-check carefully. */ + raw_spin_lock_irqsave(&desc->lock, flags); + status = desc->status; + raw_spin_unlock_irqrestore(&desc->lock, flags); + + /* Oops, that failed? */ + } while (status & IRQ_INPROGRESS); + + /* + * We made sure that no hardirq handler is running. Now verify + * that no threaded handlers are active. + */ + wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); +} +EXPORT_SYMBOL(synchronize_irq); + +#ifdef CONFIG_SMP +cpumask_var_t irq_default_affinity; + +/** + * irq_can_set_affinity - Check if the affinity of a given irq can be set + * @irq: Interrupt to check + * + */ +int irq_can_set_affinity(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || + !desc->chip->set_affinity) + return 0; + + return 1; +} + +/** + * irq_set_thread_affinity - Notify irq threads to adjust affinity + * @desc: irq descriptor which has affitnity changed + * + * We just set IRQTF_AFFINITY and delegate the affinity setting + * to the interrupt thread itself. We can not call + * set_cpus_allowed_ptr() here as we hold desc->lock and this + * code can be called from hard interrupt context. + */ +void irq_set_thread_affinity(struct irq_desc *desc) +{ + struct irqaction *action = desc->action; + + while (action) { + if (action->thread) + set_bit(IRQTF_AFFINITY, &action->thread_flags); + action = action->next; + } +} + +/** + * irq_set_affinity - Set the irq affinity of a given irq + * @irq: Interrupt to set affinity + * @cpumask: cpumask + * + */ +int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc->chip->set_affinity) + return -EINVAL; + + raw_spin_lock_irqsave(&desc->lock, flags); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + if (desc->status & IRQ_MOVE_PCNTXT) { + if (!desc->chip->set_affinity(irq, cpumask)) { + cpumask_copy(desc->affinity, cpumask); + irq_set_thread_affinity(desc); + } + } + else { + desc->status |= IRQ_MOVE_PENDING; + cpumask_copy(desc->pending_mask, cpumask); + } +#else + if (!desc->chip->set_affinity(irq, cpumask)) { + cpumask_copy(desc->affinity, cpumask); + irq_set_thread_affinity(desc); + } +#endif + desc->status |= IRQ_AFFINITY_SET; + raw_spin_unlock_irqrestore(&desc->lock, flags); + return 0; +} + +#ifndef CONFIG_AUTO_IRQ_AFFINITY +/* + * Generic version of the affinity autoselector. + */ +static int setup_affinity(unsigned int irq, struct irq_desc *desc) +{ + if (!irq_can_set_affinity(irq)) + return 0; + + /* + * Preserve an userspace affinity setup, but make sure that + * one of the targets is online. + */ + if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { + if (cpumask_any_and(desc->affinity, cpu_online_mask) + < nr_cpu_ids) + goto set_affinity; + else + desc->status &= ~IRQ_AFFINITY_SET; + } + + cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); +set_affinity: + desc->chip->set_affinity(irq, desc->affinity); + + return 0; +} +#else +static inline int setup_affinity(unsigned int irq, struct irq_desc *d) +{ + return irq_select_affinity(irq); +} +#endif + +/* + * Called when affinity is set via /proc/irq + */ +int irq_select_affinity_usr(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + int ret; + + raw_spin_lock_irqsave(&desc->lock, flags); + ret = setup_affinity(irq, desc); + if (!ret) + irq_set_thread_affinity(desc); + raw_spin_unlock_irqrestore(&desc->lock, flags); + + return ret; +} + +#else +static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) +{ + return 0; +} +#endif + +void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) +{ + if (suspend) { + if (!desc->action || (desc->action->flags & IRQF_TIMER)) + return; + desc->status |= IRQ_SUSPENDED; + } + + if (!desc->depth++) { + desc->status |= IRQ_DISABLED; + desc->chip->disable(irq); + } +} + +/** + * disable_irq_nosync - disable an irq without waiting + * @irq: Interrupt to disable + * + * Disable the selected interrupt line. Disables and Enables are + * nested. + * Unlike disable_irq(), this function does not ensure existing + * instances of the IRQ handler have completed before returning. + * + * This function may be called from IRQ context. + */ +void disable_irq_nosync(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) + return; + + chip_bus_lock(irq, desc); + raw_spin_lock_irqsave(&desc->lock, flags); + __disable_irq(desc, irq, false); + raw_spin_unlock_irqrestore(&desc->lock, flags); + chip_bus_sync_unlock(irq, desc); +} +EXPORT_SYMBOL(disable_irq_nosync); + +/** + * disable_irq - disable an irq and wait for completion + * @irq: Interrupt to disable + * + * Disable the selected interrupt line. Enables and Disables are + * nested. + * This function waits for any pending IRQ handlers for this interrupt + * to complete before returning. If you use this function while + * holding a resource the IRQ handler may need you will deadlock. + * + * This function may be called - with care - from IRQ context. + */ +void disable_irq(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (!desc) + return; + + disable_irq_nosync(irq); + if (desc->action) + synchronize_irq(irq); +} +EXPORT_SYMBOL(disable_irq); + +void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) +{ + if (resume) + desc->status &= ~IRQ_SUSPENDED; + + switch (desc->depth) { + case 0: + err_out: + WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); + break; + case 1: { + unsigned int status = desc->status & ~IRQ_DISABLED; + + if (desc->status & IRQ_SUSPENDED) + goto err_out; + /* Prevent probing on this irq: */ + desc->status = status | IRQ_NOPROBE; + check_irq_resend(desc, irq); + /* fall-through */ + } + default: + desc->depth--; + } +} + +/** + * enable_irq - enable handling of an irq + * @irq: Interrupt to enable + * + * Undoes the effect of one call to disable_irq(). If this + * matches the last disable, processing of interrupts on this + * IRQ line is re-enabled. + * + * This function may be called from IRQ context only when + * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! + */ +void enable_irq(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc) + return; + + chip_bus_lock(irq, desc); + raw_spin_lock_irqsave(&desc->lock, flags); + __enable_irq(desc, irq, false); + raw_spin_unlock_irqrestore(&desc->lock, flags); + chip_bus_sync_unlock(irq, desc); +} +EXPORT_SYMBOL(enable_irq); + +static int set_irq_wake_real(unsigned int irq, unsigned int on) +{ + struct irq_desc *desc = irq_to_desc(irq); + int ret = -ENXIO; + + if (desc->chip->set_wake) + ret = desc->chip->set_wake(irq, on); + + return ret; +} + +/** + * set_irq_wake - control irq power management wakeup + * @irq: interrupt to control + * @on: enable/disable power management wakeup + * + * Enable/disable power management wakeup mode, which is + * disabled by default. Enables and disables must match, + * just as they match for non-wakeup mode support. + * + * Wakeup mode lets this IRQ wake the system from sleep + * states like "suspend to RAM". + */ +int set_irq_wake(unsigned int irq, unsigned int on) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + int ret = 0; + + /* wakeup-capable irqs can be shared between drivers that + * don't need to have the same sleep mode behaviors. + */ + raw_spin_lock_irqsave(&desc->lock, flags); + if (on) { + if (desc->wake_depth++ == 0) { + ret = set_irq_wake_real(irq, on); + if (ret) + desc->wake_depth = 0; + else + desc->status |= IRQ_WAKEUP; + } + } else { + if (desc->wake_depth == 0) { + WARN(1, "Unbalanced IRQ %d wake disable\n", irq); + } else if (--desc->wake_depth == 0) { + ret = set_irq_wake_real(irq, on); + if (ret) + desc->wake_depth = 1; + else + desc->status &= ~IRQ_WAKEUP; + } + } + + raw_spin_unlock_irqrestore(&desc->lock, flags); + return ret; +} +EXPORT_SYMBOL(set_irq_wake); + +/* + * Internal function that tells the architecture code whether a + * particular irq has been exclusively allocated or is available + * for driver use. + */ +int can_request_irq(unsigned int irq, unsigned long irqflags) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *action; + + if (!desc) + return 0; + + if (desc->status & IRQ_NOREQUEST) + return 0; + + action = desc->action; + if (action) + if (irqflags & action->flags & IRQF_SHARED) + action = NULL; + + return !action; +} + +void compat_irq_chip_set_default_handler(struct irq_desc *desc) +{ + /* + * If the architecture still has not overriden + * the flow handler then zap the default. This + * should catch incorrect flow-type setting. + */ + if (desc->handle_irq == &handle_bad_irq) + desc->handle_irq = NULL; +} + +int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, + unsigned long flags) +{ + int ret; + struct irq_chip *chip = desc->chip; + + if (!chip || !chip->set_type) { + /* + * IRQF_TRIGGER_* but the PIC does not support multiple + * flow-types? + */ + pr_debug("No set_type function for IRQ %d (%s)\n", irq, + chip ? (chip->name ? : "unknown") : "unknown"); + return 0; + } + + /* caller masked out all except trigger mode flags */ + ret = chip->set_type(irq, flags); + + if (ret) + pr_err("setting trigger mode %d for irq %u failed (%pF)\n", + (int)flags, irq, chip->set_type); + else { + if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) + flags |= IRQ_LEVEL; + /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ + desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); + desc->status |= flags; + } + + return ret; +} + +/* + * Default primary interrupt handler for threaded interrupts. Is + * assigned as primary handler when request_threaded_irq is called + * with handler == NULL. Useful for oneshot interrupts. + */ +static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) +{ + return IRQ_WAKE_THREAD; +} + +/* + * Primary handler for nested threaded interrupts. Should never be + * called. + */ +static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) +{ + WARN(1, "Primary handler called for nested irq %d\n", irq); + return IRQ_NONE; +} + +static int irq_wait_for_interrupt(struct irqaction *action) +{ + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + + if (test_and_clear_bit(IRQTF_RUNTHREAD, + &action->thread_flags)) { + __set_current_state(TASK_RUNNING); + return 0; + } + schedule(); + } + return -1; +} + +/* + * Oneshot interrupts keep the irq line masked until the threaded + * handler finished. unmask if the interrupt has not been disabled and + * is marked MASKED. + */ +static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) +{ +again: + chip_bus_lock(irq, desc); + raw_spin_lock_irq(&desc->lock); + + /* + * Implausible though it may be we need to protect us against + * the following scenario: + * + * The thread is faster done than the hard interrupt handler + * on the other CPU. If we unmask the irq line then the + * interrupt can come in again and masks the line, leaves due + * to IRQ_INPROGRESS and the irq line is masked forever. + */ + if (unlikely(desc->status & IRQ_INPROGRESS)) { + raw_spin_unlock_irq(&desc->lock); + chip_bus_sync_unlock(irq, desc); + cpu_relax(); + goto again; + } + + if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { + desc->status &= ~IRQ_MASKED; + desc->chip->unmask(irq); + } + raw_spin_unlock_irq(&desc->lock); + chip_bus_sync_unlock(irq, desc); +} + +#ifdef CONFIG_SMP +/* + * Check whether we need to change the affinity of the interrupt thread. + */ +static void +irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) +{ + cpumask_var_t mask; + + if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) + return; + + /* + * In case we are out of memory we set IRQTF_AFFINITY again and + * try again next time + */ + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { + set_bit(IRQTF_AFFINITY, &action->thread_flags); + return; + } + + raw_spin_lock_irq(&desc->lock); + cpumask_copy(mask, desc->affinity); + raw_spin_unlock_irq(&desc->lock); + + set_cpus_allowed_ptr(current, mask); + free_cpumask_var(mask); +} +#else +static inline void +irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } +#endif + +/* + * Interrupt handler thread + */ +static int irq_thread(void *data) +{ + struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; + struct irqaction *action = data; + struct irq_desc *desc = irq_to_desc(action->irq); + int wake, oneshot = desc->status & IRQ_ONESHOT; + + sched_setscheduler(current, SCHED_FIFO, ¶m); + current->irqaction = action; + + while (!irq_wait_for_interrupt(action)) { + + irq_thread_check_affinity(desc, action); + + atomic_inc(&desc->threads_active); + + raw_spin_lock_irq(&desc->lock); + if (unlikely(desc->status & IRQ_DISABLED)) { + /* + * CHECKME: We might need a dedicated + * IRQ_THREAD_PENDING flag here, which + * retriggers the thread in check_irq_resend() + * but AFAICT IRQ_PENDING should be fine as it + * retriggers the interrupt itself --- tglx + */ + desc->status |= IRQ_PENDING; + raw_spin_unlock_irq(&desc->lock); + } else { + raw_spin_unlock_irq(&desc->lock); + + action->thread_fn(action->irq, action->dev_id); + + if (oneshot) + irq_finalize_oneshot(action->irq, desc); + } + + wake = atomic_dec_and_test(&desc->threads_active); + + if (wake && waitqueue_active(&desc->wait_for_threads)) + wake_up(&desc->wait_for_threads); + } + + /* + * Clear irqaction. Otherwise exit_irq_thread() would make + * fuzz about an active irq thread going into nirvana. + */ + current->irqaction = NULL; + return 0; +} + +/* + * Called from do_exit() + */ +void exit_irq_thread(void) +{ + struct task_struct *tsk = current; + + if (!tsk->irqaction) + return; + + printk(KERN_ERR + "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", + tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); + + /* + * Set the THREAD DIED flag to prevent further wakeups of the + * soon to be gone threaded handler. + */ + set_bit(IRQTF_DIED, &tsk->irqaction->flags); +} + +/* + * Internal function to register an irqaction - typically used to + * allocate special interrupts that are part of the architecture. + */ +static int +__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) +{ + struct irqaction *old, **old_ptr; + const char *old_name = NULL; + unsigned long flags; + int nested, shared = 0; + int ret; + + if (!desc) + return -EINVAL; + + if (desc->chip == &no_irq_chip) + return -ENOSYS; + /* + * Some drivers like serial.c use request_irq() heavily, + * so we have to be careful not to interfere with a + * running system. + */ + if (new->flags & IRQF_SAMPLE_RANDOM) { + /* + * This function might sleep, we want to call it first, + * outside of the atomic block. + * Yes, this might clear the entropy pool if the wrong + * driver is attempted to be loaded, without actually + * installing a new handler, but is this really a problem, + * only the sysadmin is able to do this. + */ + rand_initialize_irq(irq); + } + + /* Oneshot interrupts are not allowed with shared */ + if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) + return -EINVAL; + + /* + * Check whether the interrupt nests into another interrupt + * thread. + */ + nested = desc->status & IRQ_NESTED_THREAD; + if (nested) { + if (!new->thread_fn) + return -EINVAL; + /* + * Replace the primary handler which was provided from + * the driver for non nested interrupt handling by the + * dummy function which warns when called. + */ + new->handler = irq_nested_primary_handler; + } + + /* + * Create a handler thread when a thread function is supplied + * and the interrupt does not nest into another interrupt + * thread. + */ + if (new->thread_fn && !nested) { + struct task_struct *t; + + t = kthread_create(irq_thread, new, "irq/%d-%s", irq, + new->name); + if (IS_ERR(t)) + return PTR_ERR(t); + /* + * We keep the reference to the task struct even if + * the thread dies to avoid that the interrupt code + * references an already freed task_struct. + */ + get_task_struct(t); + new->thread = t; + } + + /* + * The following block of code has to be executed atomically + */ + raw_spin_lock_irqsave(&desc->lock, flags); + old_ptr = &desc->action; + old = *old_ptr; + if (old) { + /* + * Can't share interrupts unless both agree to and are + * the same type (level, edge, polarity). So both flag + * fields must have IRQF_SHARED set and the bits which + * set the trigger type must match. + */ + if (!((old->flags & new->flags) & IRQF_SHARED) || + ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { + old_name = old->name; + goto mismatch; + } + +#if defined(CONFIG_IRQ_PER_CPU) + /* All handlers must agree on per-cpuness */ + if ((old->flags & IRQF_PERCPU) != + (new->flags & IRQF_PERCPU)) + goto mismatch; +#endif + + /* add new interrupt at end of irq queue */ + do { + old_ptr = &old->next; + old = *old_ptr; + } while (old); + shared = 1; + } + + if (!shared) { + irq_chip_set_defaults(desc->chip); + + init_waitqueue_head(&desc->wait_for_threads); + + /* Setup the type (level, edge polarity) if configured: */ + if (new->flags & IRQF_TRIGGER_MASK) { + ret = __irq_set_trigger(desc, irq, + new->flags & IRQF_TRIGGER_MASK); + + if (ret) + goto out_thread; + } else + compat_irq_chip_set_default_handler(desc); +#if defined(CONFIG_IRQ_PER_CPU) + if (new->flags & IRQF_PERCPU) + desc->status |= IRQ_PER_CPU; +#endif + + desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | + IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); + + if (new->flags & IRQF_ONESHOT) + desc->status |= IRQ_ONESHOT; + + /* + * Force MSI interrupts to run with interrupts + * disabled. The multi vector cards can cause stack + * overflows due to nested interrupts when enough of + * them are directed to a core and fire at the same + * time. + */ + if (desc->msi_desc) + new->flags |= IRQF_DISABLED; + + if (!(desc->status & IRQ_NOAUTOEN)) { + desc->depth = 0; + desc->status &= ~IRQ_DISABLED; + desc->chip->startup(irq); + } else + /* Undo nested disables: */ + desc->depth = 1; + + /* Exclude IRQ from balancing if requested */ + if (new->flags & IRQF_NOBALANCING) + desc->status |= IRQ_NO_BALANCING; + + /* Set default affinity mask once everything is setup */ + setup_affinity(irq, desc); + + } else if ((new->flags & IRQF_TRIGGER_MASK) + && (new->flags & IRQF_TRIGGER_MASK) + != (desc->status & IRQ_TYPE_SENSE_MASK)) { + /* hope the handler works with the actual trigger mode... */ + pr_warning("IRQ %d uses trigger mode %d; requested %d\n", + irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), + (int)(new->flags & IRQF_TRIGGER_MASK)); + } + + new->irq = irq; + *old_ptr = new; + + /* Reset broken irq detection when installing new handler */ + desc->irq_count = 0; + desc->irqs_unhandled = 0; + + /* + * Check whether we disabled the irq via the spurious handler + * before. Reenable it and give it another chance. + */ + if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { + desc->status &= ~IRQ_SPURIOUS_DISABLED; + __enable_irq(desc, irq, false); + } + + raw_spin_unlock_irqrestore(&desc->lock, flags); + + /* + * Strictly no need to wake it up, but hung_task complains + * when no hard interrupt wakes the thread up. + */ + if (new->thread) + wake_up_process(new->thread); + + register_irq_proc(irq, desc); + new->dir = NULL; + register_handler_proc(irq, new); + + return 0; + +mismatch: +#ifdef CONFIG_DEBUG_SHIRQ + if (!(new->flags & IRQF_PROBE_SHARED)) { + printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); + if (old_name) + printk(KERN_ERR "current handler: %s\n", old_name); + dump_stack(); + } +#endif + ret = -EBUSY; + +out_thread: + raw_spin_unlock_irqrestore(&desc->lock, flags); + if (new->thread) { + struct task_struct *t = new->thread; + + new->thread = NULL; + if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) + kthread_stop(t); + put_task_struct(t); + } + return ret; +} + +/** + * setup_irq - setup an interrupt + * @irq: Interrupt line to setup + * @act: irqaction for the interrupt + * + * Used to statically setup interrupts in the early boot process. + */ +int setup_irq(unsigned int irq, struct irqaction *act) +{ + struct irq_desc *desc = irq_to_desc(irq); + + return __setup_irq(irq, desc, act); +} +EXPORT_SYMBOL_GPL(setup_irq); + + /* + * Internal function to unregister an irqaction - used to free + * regular and special interrupts that are part of the architecture. + */ +static struct irqaction *__free_irq(unsigned int irq, void *dev_id) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *action, **action_ptr; + unsigned long flags; + + WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); + + if (!desc) + return NULL; + + raw_spin_lock_irqsave(&desc->lock, flags); + + /* + * There can be multiple actions per IRQ descriptor, find the right + * one based on the dev_id: + */ + action_ptr = &desc->action; + for (;;) { + action = *action_ptr; + + if (!action) { + WARN(1, "Trying to free already-free IRQ %d\n", irq); + raw_spin_unlock_irqrestore(&desc->lock, flags); + + return NULL; + } + + if (action->dev_id == dev_id) + break; + action_ptr = &action->next; + } + + /* Found it - now remove it from the list of entries: */ + *action_ptr = action->next; + + /* Currently used only by UML, might disappear one day: */ +#ifdef CONFIG_IRQ_RELEASE_METHOD + if (desc->chip->release) + desc->chip->release(irq, dev_id); +#endif + + /* If this was the last handler, shut down the IRQ line: */ + if (!desc->action) { + desc->status |= IRQ_DISABLED; + if (desc->chip->shutdown) + desc->chip->shutdown(irq); + else + desc->chip->disable(irq); + } + + raw_spin_unlock_irqrestore(&desc->lock, flags); + + unregister_handler_proc(irq, action); + + /* Make sure it's not being used on another CPU: */ + synchronize_irq(irq); + +#ifdef CONFIG_DEBUG_SHIRQ + /* + * It's a shared IRQ -- the driver ought to be prepared for an IRQ + * event to happen even now it's being freed, so let's make sure that + * is so by doing an extra call to the handler .... + * + * ( We do this after actually deregistering it, to make sure that a + * 'real' IRQ doesn't run in * parallel with our fake. ) + */ + if (action->flags & IRQF_SHARED) { + local_irq_save(flags); + action->handler(irq, dev_id); + local_irq_restore(flags); + } +#endif + + if (action->thread) { + if (!test_bit(IRQTF_DIED, &action->thread_flags)) + kthread_stop(action->thread); + put_task_struct(action->thread); + } + + return action; +} + +/** + * remove_irq - free an interrupt + * @irq: Interrupt line to free + * @act: irqaction for the interrupt + * + * Used to remove interrupts statically setup by the early boot process. + */ +void remove_irq(unsigned int irq, struct irqaction *act) +{ + __free_irq(irq, act->dev_id); +} +EXPORT_SYMBOL_GPL(remove_irq); + +/** + * free_irq - free an interrupt allocated with request_irq + * @irq: Interrupt line to free + * @dev_id: Device identity to free + * + * Remove an interrupt handler. The handler is removed and if the + * interrupt line is no longer in use by any driver it is disabled. + * On a shared IRQ the caller must ensure the interrupt is disabled + * on the card it drives before calling this function. The function + * does not return until any executing interrupts for this IRQ + * have completed. + * + * This function must not be called from interrupt context. + */ +void free_irq(unsigned int irq, void *dev_id) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (!desc) + return; + + chip_bus_lock(irq, desc); + kfree(__free_irq(irq, dev_id)); + chip_bus_sync_unlock(irq, desc); +} +EXPORT_SYMBOL(free_irq); + +/** + * request_threaded_irq - allocate an interrupt line + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs. + * Primary handler for threaded interrupts + * If NULL and thread_fn != NULL the default + * primary handler is installed + * @thread_fn: Function called from the irq handler thread + * If NULL, no irq thread is created + * @irqflags: Interrupt type flags + * @devname: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * This call allocates interrupt resources and enables the + * interrupt line and IRQ handling. From the point this + * call is made your handler function may be invoked. Since + * your handler function must clear any interrupt the board + * raises, you must take care both to initialise your hardware + * and to set up the interrupt handler in the right order. + * + * If you want to set up a threaded irq handler for your device + * then you need to supply @handler and @thread_fn. @handler ist + * still called in hard interrupt context and has to check + * whether the interrupt originates from the device. If yes it + * needs to disable the interrupt on the device and return + * IRQ_WAKE_THREAD which will wake up the handler thread and run + * @thread_fn. This split handler design is necessary to support + * shared interrupts. + * + * Dev_id must be globally unique. Normally the address of the + * device data structure is used as the cookie. Since the handler + * receives this value it makes sense to use it. + * + * If your interrupt is shared you must pass a non NULL dev_id + * as this is required when freeing the interrupt. + * + * Flags: + * + * IRQF_SHARED Interrupt is shared + * IRQF_DISABLED Disable local interrupts while processing + * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy + * IRQF_TRIGGER_* Specify active edge(s) or level + * + */ +int request_threaded_irq(unsigned int irq, irq_handler_t handler, + irq_handler_t thread_fn, unsigned long irqflags, + const char *devname, void *dev_id) +{ + struct irqaction *action; + struct irq_desc *desc; + int retval; + + /* + * handle_IRQ_event() always ignores IRQF_DISABLED except for + * the _first_ irqaction (sigh). That can cause oopsing, but + * the behavior is classified as "will not fix" so we need to + * start nudging drivers away from using that idiom. + */ + if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == + (IRQF_SHARED|IRQF_DISABLED)) { + pr_warning( + "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", + irq, devname); + } + +#ifdef CONFIG_LOCKDEP + /* + * Lockdep wants atomic interrupt handlers: + */ + irqflags |= IRQF_DISABLED; +#endif + /* + * Sanity-check: shared interrupts must pass in a real dev-ID, + * otherwise we'll have trouble later trying to figure out + * which interrupt is which (messes up the interrupt freeing + * logic etc). + */ + if ((irqflags & IRQF_SHARED) && !dev_id) + return -EINVAL; + + desc = irq_to_desc(irq); + if (!desc) + return -EINVAL; + + if (desc->status & IRQ_NOREQUEST) + return -EINVAL; + + if (!handler) { + if (!thread_fn) + return -EINVAL; + handler = irq_default_primary_handler; + } + + action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); + if (!action) + return -ENOMEM; + + action->handler = handler; + action->thread_fn = thread_fn; + action->flags = irqflags; + action->name = devname; + action->dev_id = dev_id; + + chip_bus_lock(irq, desc); + retval = __setup_irq(irq, desc, action); + chip_bus_sync_unlock(irq, desc); + + if (retval) + kfree(action); + +#ifdef CONFIG_DEBUG_SHIRQ + if (!retval && (irqflags & IRQF_SHARED)) { + /* + * It's a shared IRQ -- the driver ought to be prepared for it + * to happen immediately, so let's make sure.... + * We disable the irq to make sure that a 'real' IRQ doesn't + * run in parallel with our fake. + */ + unsigned long flags; + + disable_irq(irq); + local_irq_save(flags); + + handler(irq, dev_id); + + local_irq_restore(flags); + enable_irq(irq); + } +#endif + return retval; +} +EXPORT_SYMBOL(request_threaded_irq); diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c new file mode 100644 index 00000000000..24196228083 --- /dev/null +++ b/kernel/irq/migration.c @@ -0,0 +1,68 @@ + +#include <linux/irq.h> +#include <linux/interrupt.h> + +#include "internals.h" + +void move_masked_irq(int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (likely(!(desc->status & IRQ_MOVE_PENDING))) + return; + + /* + * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. + */ + if (CHECK_IRQ_PER_CPU(desc->status)) { + WARN_ON(1); + return; + } + + desc->status &= ~IRQ_MOVE_PENDING; + + if (unlikely(cpumask_empty(desc->pending_mask))) + return; + + if (!desc->chip->set_affinity) + return; + + assert_raw_spin_locked(&desc->lock); + + /* + * If there was a valid mask to work with, please + * do the disable, re-program, enable sequence. + * This is *not* particularly important for level triggered + * but in a edge trigger case, we might be setting rte + * when an active trigger is comming in. This could + * cause some ioapics to mal-function. + * Being paranoid i guess! + * + * For correct operation this depends on the caller + * masking the irqs. + */ + if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) + < nr_cpu_ids)) + if (!desc->chip->set_affinity(irq, desc->pending_mask)) { + cpumask_copy(desc->affinity, desc->pending_mask); + irq_set_thread_affinity(desc); + } + + cpumask_clear(desc->pending_mask); +} + +void move_native_irq(int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (likely(!(desc->status & IRQ_MOVE_PENDING))) + return; + + if (unlikely(desc->status & IRQ_DISABLED)) + return; + + desc->chip->mask(irq); + move_masked_irq(irq); + desc->chip->unmask(irq); +} + diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c new file mode 100644 index 00000000000..26bac9d8f86 --- /dev/null +++ b/kernel/irq/numa_migrate.c @@ -0,0 +1,119 @@ +/* + * NUMA irq-desc migration code + * + * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to + * the new "home node" of the IRQ. + */ + +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/interrupt.h> +#include <linux/kernel_stat.h> + +#include "internals.h" + +static void init_copy_kstat_irqs(struct irq_desc *old_desc, + struct irq_desc *desc, + int node, int nr) +{ + init_kstat_irqs(desc, node, nr); + + if (desc->kstat_irqs != old_desc->kstat_irqs) + memcpy(desc->kstat_irqs, old_desc->kstat_irqs, + nr * sizeof(*desc->kstat_irqs)); +} + +static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) +{ + if (old_desc->kstat_irqs == desc->kstat_irqs) + return; + + kfree(old_desc->kstat_irqs); + old_desc->kstat_irqs = NULL; +} + +static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, + struct irq_desc *desc, int node) +{ + memcpy(desc, old_desc, sizeof(struct irq_desc)); + if (!alloc_desc_masks(desc, node, false)) { + printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " + "for migration.\n", irq); + return false; + } + raw_spin_lock_init(&desc->lock); + desc->node = node; + lockdep_set_class(&desc->lock, &irq_desc_lock_class); + init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); + init_copy_desc_masks(old_desc, desc); + arch_init_copy_chip_data(old_desc, desc, node); + return true; +} + +static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) +{ + free_kstat_irqs(old_desc, desc); + free_desc_masks(old_desc, desc); + arch_free_chip_data(old_desc, desc); +} + +static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, + int node) +{ + struct irq_desc *desc; + unsigned int irq; + unsigned long flags; + + irq = old_desc->irq; + + raw_spin_lock_irqsave(&sparse_irq_lock, flags); + + /* We have to check it to avoid races with another CPU */ + desc = irq_desc_ptrs[irq]; + + if (desc && old_desc != desc) + goto out_unlock; + + desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); + if (!desc) { + printk(KERN_ERR "irq %d: can not get new irq_desc " + "for migration.\n", irq); + /* still use old one */ + desc = old_desc; + goto out_unlock; + } + if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) { + /* still use old one */ + kfree(desc); + desc = old_desc; + goto out_unlock; + } + + irq_desc_ptrs[irq] = desc; + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + + /* free the old one */ + free_one_irq_desc(old_desc, desc); + kfree(old_desc); + + return desc; + +out_unlock: + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); + + return desc; +} + +struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) +{ + /* those static or target node is -1, do not move them */ + if (desc->irq < NR_IRQS_LEGACY || node == -1) + return desc; + + if (desc->node != node) + desc = __real_move_irq_desc(desc, node); + + return desc; +} + diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c new file mode 100644 index 00000000000..0d4005d85b0 --- /dev/null +++ b/kernel/irq/pm.c @@ -0,0 +1,79 @@ +/* + * linux/kernel/irq/pm.c + * + * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. + * + * This file contains power management functions related to interrupts. + */ + +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/interrupt.h> + +#include "internals.h" + +/** + * suspend_device_irqs - disable all currently enabled interrupt lines + * + * During system-wide suspend or hibernation device drivers need to be prevented + * from receiving interrupts and this function is provided for this purpose. + * It marks all interrupt lines in use, except for the timer ones, as disabled + * and sets the IRQ_SUSPENDED flag for each of them. + */ +void suspend_device_irqs(void) +{ + struct irq_desc *desc; + int irq; + + for_each_irq_desc(irq, desc) { + unsigned long flags; + + raw_spin_lock_irqsave(&desc->lock, flags); + __disable_irq(desc, irq, true); + raw_spin_unlock_irqrestore(&desc->lock, flags); + } + + for_each_irq_desc(irq, desc) + if (desc->status & IRQ_SUSPENDED) + synchronize_irq(irq); +} +EXPORT_SYMBOL_GPL(suspend_device_irqs); + +/** + * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs() + * + * Enable all interrupt lines previously disabled by suspend_device_irqs() that + * have the IRQ_SUSPENDED flag set. + */ +void resume_device_irqs(void) +{ + struct irq_desc *desc; + int irq; + + for_each_irq_desc(irq, desc) { + unsigned long flags; + + if (!(desc->status & IRQ_SUSPENDED)) + continue; + + raw_spin_lock_irqsave(&desc->lock, flags); + __enable_irq(desc, irq, true); + raw_spin_unlock_irqrestore(&desc->lock, flags); + } +} +EXPORT_SYMBOL_GPL(resume_device_irqs); + +/** + * check_wakeup_irqs - check if any wake-up interrupts are pending + */ +int check_wakeup_irqs(void) +{ + struct irq_desc *desc; + int irq; + + for_each_irq_desc(irq, desc) + if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING)) + return -EBUSY; + + return 0; +} diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c new file mode 100644 index 00000000000..6f50eccc79c --- /dev/null +++ b/kernel/irq/proc.c @@ -0,0 +1,280 @@ +/* + * linux/kernel/irq/proc.c + * + * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar + * + * This file contains the /proc/irq/ handling code. + */ + +#include <linux/irq.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/interrupt.h> + +#include "internals.h" + +static struct proc_dir_entry *root_irq_dir; + +#ifdef CONFIG_SMP + +static int irq_affinity_proc_show(struct seq_file *m, void *v) +{ + struct irq_desc *desc = irq_to_desc((long)m->private); + const struct cpumask *mask = desc->affinity; + +#ifdef CONFIG_GENERIC_PENDING_IRQ + if (desc->status & IRQ_MOVE_PENDING) + mask = desc->pending_mask; +#endif + seq_cpumask(m, mask); + seq_putc(m, '\n'); + return 0; +} + +#ifndef is_affinity_mask_valid +#define is_affinity_mask_valid(val) 1 +#endif + +int no_irq_affinity; +static ssize_t irq_affinity_proc_write(struct file *file, + const char __user *buffer, size_t count, loff_t *pos) +{ + unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; + cpumask_var_t new_value; + int err; + + if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || + irq_balancing_disabled(irq)) + return -EIO; + + if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) + return -ENOMEM; + + err = cpumask_parse_user(buffer, count, new_value); + if (err) + goto free_cpumask; + + if (!is_affinity_mask_valid(new_value)) { + err = -EINVAL; + goto free_cpumask; + } + + /* + * Do not allow disabling IRQs completely - it's a too easy + * way to make the system unusable accidentally :-) At least + * one online CPU still has to be targeted. + */ + if (!cpumask_intersects(new_value, cpu_online_mask)) { + /* Special case for empty set - allow the architecture + code to set default SMP affinity. */ + err = irq_select_affinity_usr(irq) ? -EINVAL : count; + } else { + irq_set_affinity(irq, new_value); + err = count; + } + +free_cpumask: + free_cpumask_var(new_value); + return err; +} + +static int irq_affinity_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, irq_affinity_proc_show, PDE(inode)->data); +} + +static const struct file_operations irq_affinity_proc_fops = { + .open = irq_affinity_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = irq_affinity_proc_write, +}; + +static int default_affinity_show(struct seq_file *m, void *v) +{ + seq_cpumask(m, irq_default_affinity); + seq_putc(m, '\n'); + return 0; +} + +static ssize_t default_affinity_write(struct file *file, + const char __user *buffer, size_t count, loff_t *ppos) +{ + cpumask_var_t new_value; + int err; + + if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) + return -ENOMEM; + + err = cpumask_parse_user(buffer, count, new_value); + if (err) + goto out; + + if (!is_affinity_mask_valid(new_value)) { + err = -EINVAL; + goto out; + } + + /* + * Do not allow disabling IRQs completely - it's a too easy + * way to make the system unusable accidentally :-) At least + * one online CPU still has to be targeted. + */ + if (!cpumask_intersects(new_value, cpu_online_mask)) { + err = -EINVAL; + goto out; + } + + cpumask_copy(irq_default_affinity, new_value); + err = count; + +out: + free_cpumask_var(new_value); + return err; +} + +static int default_affinity_open(struct inode *inode, struct file *file) +{ + return single_open(file, default_affinity_show, PDE(inode)->data); +} + +static const struct file_operations default_affinity_proc_fops = { + .open = default_affinity_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = default_affinity_write, +}; +#endif + +static int irq_spurious_proc_show(struct seq_file *m, void *v) +{ + struct irq_desc *desc = irq_to_desc((long) m->private); + + seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", + desc->irq_count, desc->irqs_unhandled, + jiffies_to_msecs(desc->last_unhandled)); + return 0; +} + +static int irq_spurious_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, irq_spurious_proc_show, NULL); +} + +static const struct file_operations irq_spurious_proc_fops = { + .open = irq_spurious_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#define MAX_NAMELEN 128 + +static int name_unique(unsigned int irq, struct irqaction *new_action) +{ + struct irq_desc *desc = irq_to_desc(irq); + struct irqaction *action; + unsigned long flags; + int ret = 1; + + raw_spin_lock_irqsave(&desc->lock, flags); + for (action = desc->action ; action; action = action->next) { + if ((action != new_action) && action->name && + !strcmp(new_action->name, action->name)) { + ret = 0; + break; + } + } + raw_spin_unlock_irqrestore(&desc->lock, flags); + return ret; +} + +void register_handler_proc(unsigned int irq, struct irqaction *action) +{ + char name [MAX_NAMELEN]; + struct irq_desc *desc = irq_to_desc(irq); + + if (!desc->dir || action->dir || !action->name || + !name_unique(irq, action)) + return; + + memset(name, 0, MAX_NAMELEN); + snprintf(name, MAX_NAMELEN, "%s", action->name); + + /* create /proc/irq/1234/handler/ */ + action->dir = proc_mkdir(name, desc->dir); +} + +#undef MAX_NAMELEN + +#define MAX_NAMELEN 10 + +void register_irq_proc(unsigned int irq, struct irq_desc *desc) +{ + char name [MAX_NAMELEN]; + + if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) + return; + + memset(name, 0, MAX_NAMELEN); + sprintf(name, "%d", irq); + + /* create /proc/irq/1234 */ + desc->dir = proc_mkdir(name, root_irq_dir); + if (!desc->dir) + return; + +#ifdef CONFIG_SMP + /* create /proc/irq/<irq>/smp_affinity */ + proc_create_data("smp_affinity", 0600, desc->dir, + &irq_affinity_proc_fops, (void *)(long)irq); +#endif + + proc_create_data("spurious", 0444, desc->dir, + &irq_spurious_proc_fops, (void *)(long)irq); +} + +#undef MAX_NAMELEN + +void unregister_handler_proc(unsigned int irq, struct irqaction *action) +{ + if (action->dir) { + struct irq_desc *desc = irq_to_desc(irq); + + remove_proc_entry(action->dir->name, desc->dir); + } +} + +static void register_default_affinity_proc(void) +{ +#ifdef CONFIG_SMP + proc_create("irq/default_smp_affinity", 0600, NULL, + &default_affinity_proc_fops); +#endif +} + +void init_irq_proc(void) +{ + unsigned int irq; + struct irq_desc *desc; + + /* create /proc/irq */ + root_irq_dir = proc_mkdir("irq", NULL); + if (!root_irq_dir) + return; + + register_default_affinity_proc(); + + /* + * Create entries for all existing IRQs. + */ + for_each_irq_desc(irq, desc) { + if (!desc) + continue; + + register_irq_proc(irq, desc); + } +} + diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c new file mode 100644 index 00000000000..090c3763f3a --- /dev/null +++ b/kernel/irq/resend.c @@ -0,0 +1,81 @@ +/* + * linux/kernel/irq/resend.c + * + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006, Thomas Gleixner + * + * This file contains the IRQ-resend code + * + * If the interrupt is waiting to be processed, we try to re-run it. + * We can't directly run it from here since the caller might be in an + * interrupt-protected region. Not all irq controller chips can + * retrigger interrupts at the hardware level, so in those cases + * we allow the resending of IRQs via a tasklet. + */ + +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/interrupt.h> + +#include "internals.h" + +#ifdef CONFIG_HARDIRQS_SW_RESEND + +/* Bitmap to handle software resend of interrupts: */ +static DECLARE_BITMAP(irqs_resend, NR_IRQS); + +/* + * Run software resends of IRQ's + */ +static void resend_irqs(unsigned long arg) +{ + struct irq_desc *desc; + int irq; + + while (!bitmap_empty(irqs_resend, nr_irqs)) { + irq = find_first_bit(irqs_resend, nr_irqs); + clear_bit(irq, irqs_resend); + desc = irq_to_desc(irq); + local_irq_disable(); + desc->handle_irq(irq, desc); + local_irq_enable(); + } +} + +/* Tasklet to handle resend: */ +static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0); + +#endif + +/* + * IRQ resend + * + * Is called with interrupts disabled and desc->lock held. + */ +void check_irq_resend(struct irq_desc *desc, unsigned int irq) +{ + unsigned int status = desc->status; + + /* + * Make sure the interrupt is enabled, before resending it: + */ + desc->chip->enable(irq); + + /* + * We do not resend level type interrupts. Level type + * interrupts are resent by hardware when they are still + * active. + */ + if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { + desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; + + if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) { +#ifdef CONFIG_HARDIRQS_SW_RESEND + /* Set it pending and activate the softirq: */ + set_bit(irq, irqs_resend); + tasklet_schedule(&resend_tasklet); +#endif + } + } +} diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c new file mode 100644 index 00000000000..89fb90ae534 --- /dev/null +++ b/kernel/irq/spurious.c @@ -0,0 +1,301 @@ +/* + * linux/kernel/irq/spurious.c + * + * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar + * + * This file contains spurious interrupt handling. + */ + +#include <linux/jiffies.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <linux/interrupt.h> +#include <linux/moduleparam.h> +#include <linux/timer.h> + +static int irqfixup __read_mostly; + +#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) +static void poll_spurious_irqs(unsigned long dummy); +static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); + +/* + * Recovery handler for misrouted interrupts. + */ +static int try_one_irq(int irq, struct irq_desc *desc) +{ + struct irqaction *action; + int ok = 0, work = 0; + + raw_spin_lock(&desc->lock); + /* Already running on another processor */ + if (desc->status & IRQ_INPROGRESS) { + /* + * Already running: If it is shared get the other + * CPU to go looking for our mystery interrupt too + */ + if (desc->action && (desc->action->flags & IRQF_SHARED)) + desc->status |= IRQ_PENDING; + raw_spin_unlock(&desc->lock); + return ok; + } + /* Honour the normal IRQ locking */ + desc->status |= IRQ_INPROGRESS; + action = desc->action; + raw_spin_unlock(&desc->lock); + + while (action) { + /* Only shared IRQ handlers are safe to call */ + if (action->flags & IRQF_SHARED) { + if (action->handler(irq, action->dev_id) == + IRQ_HANDLED) + ok = 1; + } + action = action->next; + } + local_irq_disable(); + /* Now clean up the flags */ + raw_spin_lock(&desc->lock); + action = desc->action; + + /* + * While we were looking for a fixup someone queued a real + * IRQ clashing with our walk: + */ + while ((desc->status & IRQ_PENDING) && action) { + /* + * Perform real IRQ processing for the IRQ we deferred + */ + work = 1; + raw_spin_unlock(&desc->lock); + handle_IRQ_event(irq, action); + raw_spin_lock(&desc->lock); + desc->status &= ~IRQ_PENDING; + } + desc->status &= ~IRQ_INPROGRESS; + /* + * If we did actual work for the real IRQ line we must let the + * IRQ controller clean up too + */ + if (work && desc->chip && desc->chip->end) + desc->chip->end(irq); + raw_spin_unlock(&desc->lock); + + return ok; +} + +static int misrouted_irq(int irq) +{ + struct irq_desc *desc; + int i, ok = 0; + + for_each_irq_desc(i, desc) { + if (!i) + continue; + + if (i == irq) /* Already tried */ + continue; + + if (try_one_irq(i, desc)) + ok = 1; + } + /* So the caller can adjust the irq error counts */ + return ok; +} + +static void poll_spurious_irqs(unsigned long dummy) +{ + struct irq_desc *desc; + int i; + + for_each_irq_desc(i, desc) { + unsigned int status; + + if (!i) + continue; + + /* Racy but it doesn't matter */ + status = desc->status; + barrier(); + if (!(status & IRQ_SPURIOUS_DISABLED)) + continue; + + local_irq_disable(); + try_one_irq(i, desc); + local_irq_enable(); + } + + mod_timer(&poll_spurious_irq_timer, + jiffies + POLL_SPURIOUS_IRQ_INTERVAL); +} + +/* + * If 99,900 of the previous 100,000 interrupts have not been handled + * then assume that the IRQ is stuck in some manner. Drop a diagnostic + * and try to turn the IRQ off. + * + * (The other 100-of-100,000 interrupts may have been a correctly + * functioning device sharing an IRQ with the failing one) + * + * Called under desc->lock + */ + +static void +__report_bad_irq(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret) +{ + struct irqaction *action; + + if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { + printk(KERN_ERR "irq event %d: bogus return value %x\n", + irq, action_ret); + } else { + printk(KERN_ERR "irq %d: nobody cared (try booting with " + "the \"irqpoll\" option)\n", irq); + } + dump_stack(); + printk(KERN_ERR "handlers:\n"); + + action = desc->action; + while (action) { + printk(KERN_ERR "[<%p>]", action->handler); + print_symbol(" (%s)", + (unsigned long)action->handler); + printk("\n"); + action = action->next; + } +} + +static void +report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) +{ + static int count = 100; + + if (count > 0) { + count--; + __report_bad_irq(irq, desc, action_ret); + } +} + +static inline int +try_misrouted_irq(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret) +{ + struct irqaction *action; + + if (!irqfixup) + return 0; + + /* We didn't actually handle the IRQ - see if it was misrouted? */ + if (action_ret == IRQ_NONE) + return 1; + + /* + * But for 'irqfixup == 2' we also do it for handled interrupts if + * they are marked as IRQF_IRQPOLL (or for irq zero, which is the + * traditional PC timer interrupt.. Legacy) + */ + if (irqfixup < 2) + return 0; + + if (!irq) + return 1; + + /* + * Since we don't get the descriptor lock, "action" can + * change under us. We don't really care, but we don't + * want to follow a NULL pointer. So tell the compiler to + * just load it once by using a barrier. + */ + action = desc->action; + barrier(); + return action && (action->flags & IRQF_IRQPOLL); +} + +void note_interrupt(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret) +{ + if (unlikely(action_ret != IRQ_HANDLED)) { + /* + * If we are seeing only the odd spurious IRQ caused by + * bus asynchronicity then don't eventually trigger an error, + * otherwise the counter becomes a doomsday timer for otherwise + * working systems + */ + if (time_after(jiffies, desc->last_unhandled + HZ/10)) + desc->irqs_unhandled = 1; + else + desc->irqs_unhandled++; + desc->last_unhandled = jiffies; + if (unlikely(action_ret != IRQ_NONE)) + report_bad_irq(irq, desc, action_ret); + } + + if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { + int ok = misrouted_irq(irq); + if (action_ret == IRQ_NONE) + desc->irqs_unhandled -= ok; + } + + desc->irq_count++; + if (likely(desc->irq_count < 100000)) + return; + + desc->irq_count = 0; + if (unlikely(desc->irqs_unhandled > 99900)) { + /* + * The interrupt is stuck + */ + __report_bad_irq(irq, desc, action_ret); + /* + * Now kill the IRQ + */ + printk(KERN_EMERG "Disabling IRQ #%d\n", irq); + desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; + desc->depth++; + desc->chip->disable(irq); + + mod_timer(&poll_spurious_irq_timer, + jiffies + POLL_SPURIOUS_IRQ_INTERVAL); + } + desc->irqs_unhandled = 0; +} + +int noirqdebug __read_mostly; + +int noirqdebug_setup(char *str) +{ + noirqdebug = 1; + printk(KERN_INFO "IRQ lockup detection disabled\n"); + + return 1; +} + +__setup("noirqdebug", noirqdebug_setup); +module_param(noirqdebug, bool, 0644); +MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); + +static int __init irqfixup_setup(char *str) +{ + irqfixup = 1; + printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); + printk(KERN_WARNING "This may impact system performance.\n"); + + return 1; +} + +__setup("irqfixup", irqfixup_setup); +module_param(irqfixup, int, 0644); + +static int __init irqpoll_setup(char *str) +{ + irqfixup = 2; + printk(KERN_WARNING "Misrouted IRQ fixup and polling support " + "enabled\n"); + printk(KERN_WARNING "This may significantly impact system " + "performance\n"); + return 1; +} + +__setup("irqpoll", irqpoll_setup); |