diff options
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/acpi/boot.c | 20 | ||||
-rw-r--r-- | arch/i386/kernel/acpi/cstate.c | 122 | ||||
-rw-r--r-- | arch/i386/kernel/acpi/earlyquirk.c | 8 | ||||
-rw-r--r-- | arch/i386/kernel/alternative.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/apic.c | 18 | ||||
-rw-r--r-- | arch/i386/kernel/apm.c | 39 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mcheck/therm_throt.c | 21 | ||||
-rw-r--r-- | arch/i386/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/i386/kernel/i8253.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/i8259.c | 11 | ||||
-rw-r--r-- | arch/i386/kernel/io_apic.c | 127 | ||||
-rw-r--r-- | arch/i386/kernel/irq.c | 14 | ||||
-rw-r--r-- | arch/i386/kernel/kprobes.c | 22 | ||||
-rw-r--r-- | arch/i386/kernel/microcode.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/nmi.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/process.c | 26 | ||||
-rw-r--r-- | arch/i386/kernel/setup.c | 15 | ||||
-rw-r--r-- | arch/i386/kernel/smp.c | 6 | ||||
-rw-r--r-- | arch/i386/kernel/syscall_table.S | 1 | ||||
-rw-r--r-- | arch/i386/kernel/time.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/time_hpet.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/traps.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/tsc.c | 6 | ||||
-rw-r--r-- | arch/i386/kernel/vm86.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/vmlinux.lds.S | 9 |
25 files changed, 386 insertions, 133 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 92f79cdd9a4..d12fb97a533 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -70,7 +70,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return #define PREFIX "ACPI: " -int acpi_noirq __initdata; /* skip ACPI IRQ initialization */ +int acpi_noirq; /* skip ACPI IRQ initialization */ int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */ int acpi_ht __initdata = 1; /* enable HT */ @@ -82,6 +82,7 @@ EXPORT_SYMBOL(acpi_strict); acpi_interrupt_flags acpi_sci_flags __initdata; int acpi_sci_override_gsi __initdata; int acpi_skip_timer_override __initdata; +int acpi_use_timer_override __initdata; #ifdef CONFIG_X86_LOCAL_APIC static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; @@ -332,7 +333,7 @@ acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end) /* * Parse Interrupt Source Override for the ACPI SCI */ -static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) +static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigger) { if (trigger == 0) /* compatible SCI trigger is level */ trigger = 3; @@ -352,13 +353,13 @@ static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger) * If GSI is < 16, this will update its flags, * else it will create a new mp_irqs[] entry. */ - mp_override_legacy_irq(gsi, polarity, trigger, gsi); + mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); /* * stash over-ride to indicate we've been here * and for later update of acpi_fadt */ - acpi_sci_override_gsi = gsi; + acpi_sci_override_gsi = bus_irq; return; } @@ -376,7 +377,7 @@ acpi_parse_int_src_ovr(acpi_table_entry_header * header, acpi_table_print_madt_entry(header); if (intsrc->bus_irq == acpi_fadt.sci_int) { - acpi_sci_ioapic_setup(intsrc->global_irq, + acpi_sci_ioapic_setup(intsrc->bus_irq, intsrc->global_irq, intsrc->flags.polarity, intsrc->flags.trigger); return 0; @@ -879,7 +880,7 @@ static int __init acpi_parse_madt_ioapic_entries(void) * pretend we got one so we can set the SCI flags. */ if (!acpi_sci_override_gsi) - acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0); + acpi_sci_ioapic_setup(acpi_fadt.sci_int, acpi_fadt.sci_int, 0, 0); /* Fill in identity legacy mapings where no override */ mp_config_acpi_legacy_irqs(); @@ -1300,6 +1301,13 @@ static int __init parse_acpi_skip_timer_override(char *arg) return 0; } early_param("acpi_skip_timer_override", parse_acpi_skip_timer_override); + +static int __init parse_acpi_use_timer_override(char *arg) +{ + acpi_use_timer_override = 1; + return 0; +} +early_param("acpi_use_timer_override", parse_acpi_use_timer_override); #endif /* CONFIG_X86_IO_APIC */ static int __init setup_acpi_sci(char *s) diff --git a/arch/i386/kernel/acpi/cstate.c b/arch/i386/kernel/acpi/cstate.c index 25db49ef177..20563e52c62 100644 --- a/arch/i386/kernel/acpi/cstate.c +++ b/arch/i386/kernel/acpi/cstate.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> +#include <linux/cpu.h> #include <acpi/processor.h> #include <asm/acpi.h> @@ -41,5 +42,124 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, flags->bm_check = 1; } } - EXPORT_SYMBOL(acpi_processor_power_init_bm_check); + +/* The code below handles cstate entry with monitor-mwait pair on Intel*/ + +struct cstate_entry_s { + struct { + unsigned int eax; + unsigned int ecx; + } states[ACPI_PROCESSOR_MAX_POWER]; +}; +static struct cstate_entry_s *cpu_cstate_entry; /* per CPU ptr */ + +static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; + +#define MWAIT_SUBSTATE_MASK (0xf) +#define MWAIT_SUBSTATE_SIZE (4) + +#define CPUID_MWAIT_LEAF (5) +#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) +#define CPUID5_ECX_INTERRUPT_BREAK (0x2) + +#define MWAIT_ECX_INTERRUPT_BREAK (0x1) + +#define NATIVE_CSTATE_BEYOND_HALT (2) + +int acpi_processor_ffh_cstate_probe(unsigned int cpu, + struct acpi_processor_cx *cx, struct acpi_power_register *reg) +{ + struct cstate_entry_s *percpu_entry; + struct cpuinfo_x86 *c = cpu_data + cpu; + + cpumask_t saved_mask; + int retval; + unsigned int eax, ebx, ecx, edx; + unsigned int edx_part; + unsigned int cstate_type; /* C-state type and not ACPI C-state type */ + unsigned int num_cstate_subtype; + + if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF ) + return -1; + + if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT) + return -1; + + percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); + percpu_entry->states[cx->index].eax = 0; + percpu_entry->states[cx->index].ecx = 0; + + /* Make sure we are running on right CPU */ + saved_mask = current->cpus_allowed; + retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); + if (retval) + return -1; + + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); + + /* Check whether this particular cx_type (in CST) is supported or not */ + cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1; + edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); + num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; + + retval = 0; + if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) { + retval = -1; + goto out; + } + + /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */ + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || + !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) { + retval = -1; + goto out; + } + percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; + + /* Use the hint in CST */ + percpu_entry->states[cx->index].eax = cx->address; + + if (!mwait_supported[cstate_type]) { + mwait_supported[cstate_type] = 1; + printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d " + "state\n", cx->type); + } + +out: + set_cpus_allowed(current, saved_mask); + return retval; +} +EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); + +void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) +{ + unsigned int cpu = smp_processor_id(); + struct cstate_entry_s *percpu_entry; + + percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); + mwait_idle_with_hints(percpu_entry->states[cx->index].eax, + percpu_entry->states[cx->index].ecx); +} +EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter); + +static int __init ffh_cstate_init(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + if (c->x86_vendor != X86_VENDOR_INTEL) + return -1; + + cpu_cstate_entry = alloc_percpu(struct cstate_entry_s); + return 0; +} + +static void __exit ffh_cstate_exit(void) +{ + if (cpu_cstate_entry) { + free_percpu(cpu_cstate_entry); + cpu_cstate_entry = NULL; + } +} + +arch_initcall(ffh_cstate_init); +__exitcall(ffh_cstate_exit); diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c index fe799b11ac0..c9841692bb7 100644 --- a/arch/i386/kernel/acpi/earlyquirk.c +++ b/arch/i386/kernel/acpi/earlyquirk.c @@ -27,11 +27,17 @@ static int __init check_bridge(int vendor, int device) #ifdef CONFIG_ACPI /* According to Nvidia all timer overrides are bogus unless HPET is enabled. */ - if (vendor == PCI_VENDOR_ID_NVIDIA) { + if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { nvidia_hpet_detected = 0; acpi_table_parse(ACPI_HPET, nvidia_hpet_check); if (nvidia_hpet_detected == 0) { acpi_skip_timer_override = 1; + printk(KERN_INFO "Nvidia board " + "detected. Ignoring ACPI " + "timer override.\n"); + printk(KERN_INFO "If you got timer trouble " + "try acpi_use_timer_override\n"); + } } #endif diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c index 28ab8064976..583c238e17f 100644 --- a/arch/i386/kernel/alternative.c +++ b/arch/i386/kernel/alternative.c @@ -344,6 +344,7 @@ void alternatives_smp_switch(int smp) void __init alternative_instructions(void) { + unsigned long flags; if (no_replacement) { printk(KERN_INFO "(SMP-)alternatives turned off\n"); free_init_pages("SMP alternatives", @@ -351,6 +352,8 @@ void __init alternative_instructions(void) (unsigned long)__smp_alt_end); return; } + + local_irq_save(flags); apply_alternatives(__alt_instructions, __alt_instructions_end); /* switch to patch-once-at-boottime-only mode and free the @@ -386,4 +389,5 @@ void __init alternative_instructions(void) alternatives_smp_switch(0); } #endif + local_irq_restore(flags); } diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 90faae5c5d3..2fd4b7d927c 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -1193,11 +1193,11 @@ EXPORT_SYMBOL(switch_ipi_to_APIC_timer); * value into /proc/profile. */ -inline void smp_local_timer_interrupt(struct pt_regs * regs) +inline void smp_local_timer_interrupt(void) { - profile_tick(CPU_PROFILING, regs); + profile_tick(CPU_PROFILING); #ifdef CONFIG_SMP - update_process_times(user_mode_vm(regs)); + update_process_times(user_mode_vm(get_irq_regs())); #endif /* @@ -1223,6 +1223,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs) fastcall void smp_apic_timer_interrupt(struct pt_regs *regs) { + struct pt_regs *old_regs = set_irq_regs(regs); int cpu = smp_processor_id(); /* @@ -1241,12 +1242,13 @@ fastcall void smp_apic_timer_interrupt(struct pt_regs *regs) * interrupt lock, which is the WrongThing (tm) to do. */ irq_enter(); - smp_local_timer_interrupt(regs); + smp_local_timer_interrupt(); irq_exit(); + set_irq_regs(old_regs); } #ifndef CONFIG_SMP -static void up_apic_timer_interrupt_call(struct pt_regs *regs) +static void up_apic_timer_interrupt_call(void) { int cpu = smp_processor_id(); @@ -1255,11 +1257,11 @@ static void up_apic_timer_interrupt_call(struct pt_regs *regs) */ per_cpu(irq_stat, cpu).apic_timer_irqs++; - smp_local_timer_interrupt(regs); + smp_local_timer_interrupt(); } #endif -void smp_send_timer_broadcast_ipi(struct pt_regs *regs) +void smp_send_timer_broadcast_ipi(void) { cpumask_t mask; @@ -1272,7 +1274,7 @@ void smp_send_timer_broadcast_ipi(struct pt_regs *regs) * We can directly call the apic timer interrupt handler * in UP case. Minus all irq related functions */ - up_apic_timer_interrupt_call(regs); + up_apic_timer_interrupt_call(); #endif } } diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index b42f2d914af..a60358fe9a4 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c @@ -198,7 +198,7 @@ * (APM) BIOS Interface Specification, Revision 1.2, February 1996. * * [This document is available from Microsoft at: - * http://www.microsoft.com/hwdev/busbios/amp_12.htm] + * http://www.microsoft.com/whdc/archive/amp_12.mspx] */ #include <linux/module.h> @@ -540,11 +540,30 @@ static inline void apm_restore_cpus(cpumask_t mask) * Also, we KNOW that for the non error case of apm_bios_call, there * is no useful data returned in the low order 8 bits of eax. */ -#define APM_DO_CLI \ - if (apm_info.allow_ints) \ - local_irq_enable(); \ - else \ + +static inline unsigned long __apm_irq_save(void) +{ + unsigned long flags; + local_save_flags(flags); + if (apm_info.allow_ints) { + if (irqs_disabled_flags(flags)) + local_irq_enable(); + } else + local_irq_disable(); + + return flags; +} + +#define apm_irq_save(flags) \ + do { flags = __apm_irq_save(); } while (0) + +static inline void apm_irq_restore(unsigned long flags) +{ + if (irqs_disabled_flags(flags)) local_irq_disable(); + else if (irqs_disabled()) + local_irq_enable(); +} #ifdef APM_ZERO_SEGS # define APM_DECL_SEGS \ @@ -596,12 +615,11 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in, save_desc_40 = gdt[0x40 / 8]; gdt[0x40 / 8] = bad_bios_desc; - local_save_flags(flags); - APM_DO_CLI; + apm_irq_save(flags); APM_DO_SAVE_SEGS; apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); APM_DO_RESTORE_SEGS; - local_irq_restore(flags); + apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); apm_restore_cpus(cpus); @@ -640,12 +658,11 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax) save_desc_40 = gdt[0x40 / 8]; gdt[0x40 / 8] = bad_bios_desc; - local_save_flags(flags); - APM_DO_CLI; + apm_irq_save(flags); APM_DO_SAVE_SEGS; error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); APM_DO_RESTORE_SEGS; - local_irq_restore(flags); + apm_irq_restore(flags); gdt[0x40 / 8] = save_desc_40; put_cpu(); apm_restore_cpus(cpus); diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c index 4f43047de40..2d8703b7ce6 100644 --- a/arch/i386/kernel/cpu/mcheck/therm_throt.c +++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c @@ -110,17 +110,15 @@ int therm_throt_process(int curr) #ifdef CONFIG_SYSFS /* Add/Remove thermal_throttle interface for CPU device */ -static __cpuinit int thermal_throttle_add_dev(struct sys_device * sys_dev) +static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) { - sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group); - return 0; + return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group); } #ifdef CONFIG_HOTPLUG_CPU -static __cpuinit int thermal_throttle_remove_dev(struct sys_device * sys_dev) +static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) { - sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); - return 0; + return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); } /* Mutex protecting device creation against CPU hotplug */ @@ -133,12 +131,14 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb, { unsigned int cpu = (unsigned long)hcpu; struct sys_device *sys_dev; + int err; sys_dev = get_cpu_sysdev(cpu); mutex_lock(&therm_cpu_lock); switch (action) { case CPU_ONLINE: - thermal_throttle_add_dev(sys_dev); + err = thermal_throttle_add_dev(sys_dev); + WARN_ON(err); break; case CPU_DEAD: thermal_throttle_remove_dev(sys_dev); @@ -157,6 +157,7 @@ static struct notifier_block thermal_throttle_cpu_notifier = static __init int thermal_throttle_init_device(void) { unsigned int cpu = 0; + int err; if (!atomic_read(&therm_throt_en)) return 0; @@ -167,8 +168,10 @@ static __init int thermal_throttle_init_device(void) mutex_lock(&therm_cpu_lock); #endif /* connect live CPUs to sysfs */ - for_each_online_cpu(cpu) - thermal_throttle_add_dev(get_cpu_sysdev(cpu)); + for_each_online_cpu(cpu) { + err = thermal_throttle_add_dev(get_cpu_sysdev(cpu)); + WARN_ON(err); + } #ifdef CONFIG_HOTPLUG_CPU mutex_unlock(&therm_cpu_lock); #endif diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index be9d883c62c..ca31f18d277 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S @@ -317,7 +317,7 @@ is386: movl $2,%ecx # set MP movl %eax,%gs lldt %ax cld # gcc2 wants the direction flag cleared at all times - pushl %eax # fake return address + pushl $0 # fake return address for unwinder #ifdef CONFIG_SMP movb ready, %cl movb $1, ready diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c index 477b24daff5..9a0060b92e3 100644 --- a/arch/i386/kernel/i8253.c +++ b/arch/i386/kernel/i8253.c @@ -109,7 +109,7 @@ static struct clocksource clocksource_pit = { static int __init init_pit_clocksource(void) { - if (num_possible_cpus() > 4) /* PIT does not scale! */ + if (num_possible_cpus() > 1) /* PIT does not scale! */ return 0; clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20); diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c index d07ed31f11e..62996cd1708 100644 --- a/arch/i386/kernel/i8259.c +++ b/arch/i386/kernel/i8259.c @@ -113,7 +113,8 @@ void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); io_apic_irqs &= ~(1<<irq); - set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); + set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, + "XT"); enable_irq(irq); } @@ -335,13 +336,13 @@ void init_8259A(int auto_eoi) */ -static irqreturn_t math_error_irq(int cpl, void *dev_id, struct pt_regs *regs) +static irqreturn_t math_error_irq(int cpl, void *dev_id) { extern void math_error(void __user *); outb(0,0xF0); if (ignore_fpu_irq || !boot_cpu_data.hard_math) return IRQ_NONE; - math_error((void __user *)regs->eip); + math_error((void __user *)get_irq_regs()->eip); return IRQ_HANDLED; } @@ -369,8 +370,8 @@ void __init init_ISA_irqs (void) /* * 16 old-style INTA-cycle interrupts: */ - set_irq_chip_and_handler(i, &i8259A_chip, - handle_level_irq); + set_irq_chip_and_handler_name(i, &i8259A_chip, + handle_level_irq, "XT"); } else { /* * 'high' PCI IRQs filled in on demand diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index b7287fb499f..3b7a63e0ed1 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -91,6 +91,46 @@ static struct irq_pin_list { int apic, pin, next; } irq_2_pin[PIN_MAP_SIZE]; +struct io_apic { + unsigned int index; + unsigned int unused[3]; + unsigned int data; +}; + +static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) +{ + return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) + + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK); +} + +static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) +{ + struct io_apic __iomem *io_apic = io_apic_base(apic); + writel(reg, &io_apic->index); + return readl(&io_apic->data); +} + +static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) +{ + struct io_apic __iomem *io_apic = io_apic_base(apic); + writel(reg, &io_apic->index); + writel(value, &io_apic->data); +} + +/* + * Re-write a value: to be used for read-modify-write + * cycles where the read already set up the index register. + * + * Older SiS APIC requires we rewrite the index register + */ +static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) +{ + volatile struct io_apic *io_apic = io_apic_base(apic); + if (sis_apic_bug) + writel(reg, &io_apic->index); + writel(value, &io_apic->data); +} + union entry_union { struct { u32 w1, w2; }; struct IO_APIC_route_entry entry; @@ -107,12 +147,34 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) return eu.entry; } +/* + * When we write a new IO APIC routing entry, we need to write the high + * word first! If the mask bit in the low word is clear, we will enable + * the interrupt, and we need to make sure the entry is fully populated + * before that happens. + */ static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) { unsigned long flags; union entry_union eu; eu.entry = e; spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(apic, 0x11 + 2*pin, eu.w2); + io_apic_write(apic, 0x10 + 2*pin, eu.w1); + spin_unlock_irqrestore(&ioapic_lock, flags); +} + +/* + * When we mask an IO APIC routing entry, we need to write the low + * word first, in order to set the mask bit before we change the + * high bits! + */ +static void ioapic_mask_entry(int apic, int pin) +{ + unsigned long flags; + union entry_union eu = { .entry.mask = 1 }; + + spin_lock_irqsave(&ioapic_lock, flags); io_apic_write(apic, 0x10 + 2*pin, eu.w1); io_apic_write(apic, 0x11 + 2*pin, eu.w2); spin_unlock_irqrestore(&ioapic_lock, flags); @@ -234,9 +296,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) /* * Disable it in the IO-APIC irq-routing table: */ - memset(&entry, 0, sizeof(entry)); - entry.mask = 1; - ioapic_write_entry(apic, pin, entry); + ioapic_mask_entry(apic, pin); } static void clear_IO_APIC (void) @@ -1184,8 +1244,8 @@ static int __assign_irq_vector(int irq) BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); - if (IO_APIC_VECTOR(irq) > 0) - return IO_APIC_VECTOR(irq); + if (irq_vector[irq] > 0) + return irq_vector[irq]; current_vector += 8; if (current_vector == SYSCALL_VECTOR) @@ -1199,7 +1259,7 @@ static int __assign_irq_vector(int irq) } vector = current_vector; - IO_APIC_VECTOR(irq) = vector; + irq_vector[irq] = vector; return vector; } @@ -1225,11 +1285,13 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger) { if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || trigger == IOAPIC_LEVEL) - set_irq_chip_and_handler(irq, &ioapic_chip, - handle_fasteoi_irq); - else - set_irq_chip_and_handler(irq, &ioapic_chip, - handle_edge_irq); + set_irq_chip_and_handler_name(irq, &ioapic_chip, + handle_fasteoi_irq, "fasteoi"); + else { + irq_desc[irq].status |= IRQ_DELAYED_DISABLE; + set_irq_chip_and_handler_name(irq, &ioapic_chip, + handle_edge_irq, "edge"); + } set_intr_gate(vector, interrupt[irq]); } @@ -1967,7 +2029,7 @@ static void ack_ioapic_quirk_irq(unsigned int irq) * operation to prevent an edge-triggered interrupt escaping meanwhile. * The idea is from Manfred Spraul. --macro */ - i = IO_APIC_VECTOR(irq); + i = irq_vector[irq]; v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); @@ -1984,7 +2046,7 @@ static void ack_ioapic_quirk_irq(unsigned int irq) static int ioapic_retrigger_irq(unsigned int irq) { - send_IPI_self(IO_APIC_VECTOR(irq)); + send_IPI_self(irq_vector[irq]); return 1; } @@ -2020,7 +2082,7 @@ static inline void init_IO_APIC_traps(void) */ for (irq = 0; irq < NR_IRQS ; irq++) { int tmp = irq; - if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) { + if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) { /* * Hmm.. We don't have an entry for this, * so default to an old-fashioned 8259 @@ -2235,7 +2297,8 @@ static inline void check_timer(void) printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); disable_8259A_irq(0); - set_irq_chip_and_handler(0, &lapic_chip, handle_fasteoi_irq); + set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq, + "fasteio"); apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ enable_8259A_irq(0); @@ -2541,7 +2604,8 @@ int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev) write_msi_msg(irq, &msg); - set_irq_chip_and_handler(irq, &msi_chip, handle_edge_irq); + set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, + "edge"); return 0; } @@ -2562,18 +2626,16 @@ void arch_teardown_msi_irq(unsigned int irq) static void target_ht_irq(unsigned int irq, unsigned int dest) { - u32 low, high; - low = read_ht_irq_low(irq); - high = read_ht_irq_high(irq); + struct ht_irq_msg msg; + fetch_ht_irq_msg(irq, &msg); - low &= ~(HT_IRQ_LOW_DEST_ID_MASK); - high &= ~(HT_IRQ_HIGH_DEST_ID_MASK); + msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK); + msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); - low |= HT_IRQ_LOW_DEST_ID(dest); - high |= HT_IRQ_HIGH_DEST_ID(dest); + msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest); + msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); - write_ht_irq_low(irq, low); - write_ht_irq_high(irq, high); + write_ht_irq_msg(irq, &msg); } static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) @@ -2594,7 +2656,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) } #endif -static struct hw_interrupt_type ht_irq_chip = { +static struct irq_chip ht_irq_chip = { .name = "PCI-HT", .mask = mask_ht_irq, .unmask = unmask_ht_irq, @@ -2611,7 +2673,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) vector = assign_irq_vector(irq); if (vector >= 0) { - u32 low, high; + struct ht_irq_msg msg; unsigned dest; cpumask_t tmp; @@ -2619,9 +2681,10 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) cpu_set(vector >> 8, tmp); dest = cpu_mask_to_apicid(tmp); - high = HT_IRQ_HIGH_DEST_ID(dest); + msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); - low = HT_IRQ_LOW_BASE | + msg.address_lo = + HT_IRQ_LOW_BASE | HT_IRQ_LOW_DEST_ID(dest) | HT_IRQ_LOW_VECTOR(vector) | ((INT_DEST_MODE == 0) ? @@ -2633,10 +2696,10 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) HT_IRQ_LOW_MT_ARBITRATED) | HT_IRQ_LOW_IRQ_MASKED; - write_ht_irq_low(irq, low); - write_ht_irq_high(irq, high); + write_ht_irq_msg(irq, &msg); - set_irq_chip_and_handler(irq, &ht_irq_chip, handle_edge_irq); + set_irq_chip_and_handler_name(irq, &ht_irq_chip, + handle_edge_irq, "edge"); } return vector; } diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index 3dd2e180151..3201d421090 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c @@ -53,6 +53,7 @@ static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; */ fastcall unsigned int do_IRQ(struct pt_regs *regs) { + struct pt_regs *old_regs; /* high bit used in ret_from_ code */ int irq = ~regs->orig_eax; struct irq_desc *desc = irq_desc + irq; @@ -67,6 +68,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs) BUG(); } + old_regs = set_irq_regs(regs); irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 1KB free? */ @@ -95,7 +97,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs) * current stack (which is the irq stack already after all) */ if (curctx != irqctx) { - int arg1, arg2, arg3, ebx; + int arg1, arg2, ebx; /* build the stack frame on the IRQ stack */ isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); @@ -114,17 +116,17 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs) " xchgl %%ebx,%%esp \n" " call *%%edi \n" " movl %%ebx,%%esp \n" - : "=a" (arg1), "=d" (arg2), "=c" (arg3), "=b" (ebx) - : "0" (irq), "1" (desc), "2" (regs), "3" (isp), + : "=a" (arg1), "=d" (arg2), "=b" (ebx) + : "0" (irq), "1" (desc), "2" (isp), "D" (desc->handle_irq) : "memory", "cc" ); } else #endif - desc->handle_irq(irq, desc, regs); + desc->handle_irq(irq, desc); irq_exit(); - + set_irq_regs(old_regs); return 1; } @@ -256,7 +258,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); #endif seq_printf(p, " %8s", irq_desc[i].chip->name); - seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq)); + seq_printf(p, "-%-8s", irq_desc[i].name); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index d98e44b16fe..fc79e1e859c 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c @@ -361,8 +361,11 @@ no_kprobe: asm volatile ( ".global kretprobe_trampoline\n" "kretprobe_trampoline: \n" " pushf\n" - /* skip cs, eip, orig_eax, es, ds */ - " subl $20, %esp\n" + /* skip cs, eip, orig_eax */ + " subl $12, %esp\n" + " pushl %gs\n" + " pushl %ds\n" + " pushl %es\n" " pushl %eax\n" " pushl %ebp\n" " pushl %edi\n" @@ -373,10 +376,10 @@ no_kprobe: " movl %esp, %eax\n" " call trampoline_handler\n" /* move eflags to cs */ - " movl 48(%esp), %edx\n" - " movl %edx, 44(%esp)\n" + " movl 52(%esp), %edx\n" + " movl %edx, 48(%esp)\n" /* save true return address on eflags */ - " movl %eax, 48(%esp)\n" + " movl %eax, 52(%esp)\n" " popl %ebx\n" " popl %ecx\n" " popl %edx\n" @@ -384,8 +387,8 @@ no_kprobe: " popl %edi\n" " popl %ebp\n" " popl %eax\n" - /* skip eip, orig_eax, es, ds */ - " addl $16, %esp\n" + /* skip eip, orig_eax, es, ds, gs */ + " addl $20, %esp\n" " popf\n" " ret\n"); } @@ -404,6 +407,10 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) INIT_HLIST_HEAD(&empty_rp); spin_lock_irqsave(&kretprobe_lock, flags); head = kretprobe_inst_table_head(current); + /* fixup registers */ + regs->xcs = __KERNEL_CS; + regs->eip = trampoline_address; + regs->orig_eax = 0xffffffff; /* * It is possible to have multiple instances associated with a given @@ -425,6 +432,7 @@ fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) if (ri->rp && ri->rp->handler){ __get_cpu_var(current_kprobe) = &ri->rp->kp; + get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; ri->rp->handler(ri, regs); __get_cpu_var(current_kprobe) = NULL; } diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index 9b9479768d5..23f5984d065 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c @@ -577,7 +577,7 @@ static void microcode_init_cpu(int cpu) set_cpus_allowed(current, cpumask_of_cpu(cpu)); mutex_lock(µcode_mutex); collect_cpu_info(cpu); - if (uci->valid) + if (uci->valid && system_state == SYSTEM_RUNNING) cpu_request_microcode(cpu); mutex_unlock(µcode_mutex); set_cpus_allowed(current, old); @@ -656,14 +656,18 @@ static struct attribute_group mc_attr_group = { static int mc_sysdev_add(struct sys_device *sys_dev) { - int cpu = sys_dev->id; + int err, cpu = sys_dev->id; struct ucode_cpu_info *uci = ucode_cpu_info + cpu; if (!cpu_online(cpu)) return 0; + pr_debug("Microcode:CPU %d added\n", cpu); memset(uci, 0, sizeof(*uci)); - sysfs_create_group(&sys_dev->kobj, &mc_attr_group); + + err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); + if (err) + return err; microcode_init_cpu(cpu); return 0; diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 3e8e3adb048..eaafe233a5d 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -219,11 +219,11 @@ static int __init check_nmi_watchdog(void) int cpu; /* Enable NMI watchdog for newer systems. - Actually it should be safe for most systems before 2004 too except - for some IBM systems that corrupt registers when NMI happens - during SMM. Unfortunately we don't have more exact information - on these and use this coarse check. */ - if (nmi_watchdog == NMI_DEFAULT && dmi_get_year(DMI_BIOS_DATE) >= 2004) + Probably safe on most older systems too, but let's be careful. + IBM ThinkPads use INT10 inside SMM and that allows early NMI inside SMM + which hangs the system. Disable watchdog for all thinkpads */ + if (nmi_watchdog == NMI_DEFAULT && dmi_get_year(DMI_BIOS_DATE) >= 2004 && + !dmi_name_in_vendors("ThinkPad")) nmi_watchdog = NMI_LOCAL_APIC; if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT)) diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index dad02a960e0..dd53c58f64f 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -205,7 +205,7 @@ void cpu_idle(void) void cpu_idle_wait(void) { unsigned int cpu, this_cpu = get_cpu(); - cpumask_t map; + cpumask_t map, tmp = current->cpus_allowed; set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); put_cpu(); @@ -227,6 +227,8 @@ void cpu_idle_wait(void) } cpus_and(map, map, cpu_online_map); } while (!cpus_empty(map)); + + set_cpus_allowed(current, tmp); } EXPORT_SYMBOL_GPL(cpu_idle_wait); @@ -236,20 +238,28 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait); * We execute MONITOR against need_resched and enter optimized wait state * through MWAIT. Whenever someone changes need_resched, we would be woken * up from MWAIT (without an IPI). + * + * New with Core Duo processors, MWAIT can take some hints based on CPU + * capability. */ -static void mwait_idle(void) +void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { - local_irq_enable(); - - while (!need_resched()) { + if (!need_resched()) { __monitor((void *)¤t_thread_info()->flags, 0, 0); smp_mb(); - if (need_resched()) - break; - __mwait(0, 0); + if (!need_resched()) + __mwait(eax, ecx); } } +/* Default MONITOR/MWAIT with no hints, used for default C1 state */ +static void mwait_idle(void) +{ + local_irq_enable(); + while (!need_resched()) + mwait_idle_with_hints(0, 0); +} + void __devinit select_idle_routine(const struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_MWAIT)) { diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 000cf03751f..141041dde74 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -846,7 +846,7 @@ efi_find_max_pfn(unsigned long start, unsigned long end, void *arg) static int __init efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg) { - memory_present(0, start, end); + memory_present(0, PFN_UP(start), PFN_DOWN(end)); return 0; } @@ -1083,16 +1083,15 @@ static unsigned long __init setup_memory(void) void __init zone_sizes_init(void) { + unsigned long max_zone_pfns[MAX_NR_ZONES]; + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); + max_zone_pfns[ZONE_DMA] = + virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM - unsigned long max_zone_pfns[MAX_NR_ZONES] = { - virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT, - max_low_pfn, - highend_pfn}; + max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; add_active_range(0, 0, highend_pfn); #else - unsigned long max_zone_pfns[MAX_NR_ZONES] = { - virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT, - max_low_pfn}; add_active_range(0, 0, max_low_pfn); #endif diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 1b080ab8a49..31e5c6573aa 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c @@ -321,6 +321,7 @@ static inline void leave_mm (unsigned long cpu) fastcall void smp_invalidate_interrupt(struct pt_regs *regs) { + struct pt_regs *old_regs = set_irq_regs(regs); unsigned long cpu; cpu = get_cpu(); @@ -351,6 +352,7 @@ fastcall void smp_invalidate_interrupt(struct pt_regs *regs) smp_mb__after_clear_bit(); out: put_cpu_no_resched(); + set_irq_regs(old_regs); } static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, @@ -605,11 +607,14 @@ void smp_send_stop(void) */ fastcall void smp_reschedule_interrupt(struct pt_regs *regs) { + struct pt_regs *old_regs = set_irq_regs(regs); ack_APIC_irq(); + set_irq_regs(old_regs); } fastcall void smp_call_function_interrupt(struct pt_regs *regs) { + struct pt_regs *old_regs = set_irq_regs(regs); void (*func) (void *info) = call_data->func; void *info = call_data->info; int wait = call_data->wait; @@ -632,6 +637,7 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs) mb(); atomic_inc(&call_data->finished); } + set_irq_regs(old_regs); } /* diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index 7e639f78b0b..2697e9210e9 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S @@ -318,3 +318,4 @@ ENTRY(sys_call_table) .long sys_vmsplice .long sys_move_pages .long sys_getcpu + .long sys_epoll_pwait diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 58a2d558241..78af572fd17 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c @@ -161,7 +161,7 @@ EXPORT_SYMBOL(profile_pc); * Time Stamp Counter value at the time of the timer interrupt, so that * we later on can estimate the time of day more exactly. */ -irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +irqreturn_t timer_interrupt(int irq, void *dev_id) { /* * Here we are in the timer irq handler. We just have irqs locally @@ -188,7 +188,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) } #endif - do_timer_interrupt_hook(regs); + do_timer_interrupt_hook(); if (MCA_bus) { @@ -201,15 +201,15 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) high bit of the PPI port B (0x61). Note that some PS/2s, notably the 55SX, work fine if this is removed. */ - irq = inb_p( 0x61 ); /* read the current state */ - outb_p( irq|0x80, 0x61 ); /* reset the IRQ */ + u8 irq_v = inb_p( 0x61 ); /* read the current state */ + outb_p( irq_v|0x80, 0x61 ); /* reset the IRQ */ } write_sequnlock(&xtime_lock); #ifdef CONFIG_X86_LOCAL_APIC if (using_apic_timer) - smp_send_timer_broadcast_ipi(regs); + smp_send_timer_broadcast_ipi(); #endif return IRQ_HANDLED; diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c index 6bf14a4e995..1a2a979cf6a 100644 --- a/arch/i386/kernel/time_hpet.c +++ b/arch/i386/kernel/time_hpet.c @@ -441,7 +441,7 @@ int hpet_rtc_dropped_irq(void) return 1; } -irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs) +irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) { struct rtc_time curr_time; unsigned long rtc_int_flag = 0; @@ -480,7 +480,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs) } if (call_rtc_interrupt) { rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8)); - rtc_interrupt(rtc_int_flag, dev_id, regs); + rtc_interrupt(rtc_int_flag, dev_id); } return IRQ_HANDLED; } diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 00489b706d2..fe9c5e8e7e6 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -129,15 +129,19 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, #ifdef CONFIG_FRAME_POINTER while (valid_stack_ptr(tinfo, (void *)ebp)) { + unsigned long new_ebp; addr = *(unsigned long *)(ebp + 4); ops->address(data, addr); /* * break out of recursive entries (such as - * end_of_stack_stop_unwind_function): + * end_of_stack_stop_unwind_function). Also, + * we can never allow a frame pointer to + * move downwards! */ - if (ebp == *(unsigned long *)ebp) + new_ebp = *(unsigned long *)ebp; + if (new_ebp <= ebp) break; - ebp = *(unsigned long *)ebp; + ebp = new_ebp; } #else while (valid_stack_ptr(tinfo, stack)) { diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index b8fa0a8b2e4..fbc95828cd7 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c @@ -349,8 +349,8 @@ static int tsc_update_callback(void) int change = 0; /* check to see if we should switch to the safe clocksource: */ - if (clocksource_tsc.rating != 50 && check_tsc_unstable()) { - clocksource_tsc.rating = 50; + if (clocksource_tsc.rating != 0 && check_tsc_unstable()) { + clocksource_tsc.rating = 0; clocksource_reselect(); change = 1; } @@ -461,7 +461,7 @@ static int __init init_tsc_clocksource(void) clocksource_tsc.shift); /* lower the rating if we already know its unstable: */ if (check_tsc_unstable()) - clocksource_tsc.rating = 50; + clocksource_tsc.rating = 0; init_timer(&verify_tsc_freq_timer); verify_tsc_freq_timer.function = verify_tsc_freq; diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index 8355d8d87d1..cbcd61d6120 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c @@ -714,7 +714,7 @@ static int irqbits; | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | (1 << SIGUNUSED) ) -static irqreturn_t irq_handler(int intno, void *dev_id, struct pt_regs * regs) +static irqreturn_t irq_handler(int intno, void *dev_id) { int irq_bit; unsigned long flags; diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S index 1e7ac1c44dd..c6f84a0322b 100644 --- a/arch/i386/kernel/vmlinux.lds.S +++ b/arch/i386/kernel/vmlinux.lds.S @@ -51,6 +51,7 @@ SECTIONS __tracedata_end = .; /* writeable */ + . = ALIGN(4096); .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ *(.data) CONSTRUCTORS @@ -126,13 +127,7 @@ SECTIONS __setup_end = .; __initcall_start = .; .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { - *(.initcall1.init) - *(.initcall2.init) - *(.initcall3.init) - *(.initcall4.init) - *(.initcall5.init) - *(.initcall6.init) - *(.initcall7.init) + INITCALLS } __initcall_end = .; __con_initcall_start = .; |