summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/crypto/async-tx-api.txt219
-rw-r--r--Makefile2
-rw-r--r--arch/arm/mach-ep93xx/core.c2
-rw-r--r--arch/arm/mm/cache-l2x0.c12
-rw-r--r--arch/mips/kernel/i8259.c5
-rw-r--r--arch/mips/kernel/irq-msc01.c10
-rw-r--r--arch/mips/kernel/irq.c10
-rw-r--r--arch/mips/kernel/smtc.c5
-rw-r--r--arch/powerpc/kernel/process.c7
-rw-r--r--crypto/async_tx/async_tx.c12
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_idle.c19
-rw-r--r--drivers/acpi/sleep/Makefile2
-rw-r--r--drivers/acpi/sleep/main.c57
-rw-r--r--drivers/acpi/sleep/poweroff.c75
-rw-r--r--drivers/acpi/video.c3
-rw-r--r--drivers/char/mspec.c26
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c62
-rw-r--r--drivers/input/mouse/appletouch.c6
-rw-r--r--drivers/kvm/Kconfig3
-rw-r--r--drivers/lguest/lguest_asm.S6
-rw-r--r--drivers/md/raid5.c17
-rw-r--r--drivers/net/pcmcia/3c589_cs.c2
-rw-r--r--drivers/net/r8169.c14
-rw-r--r--drivers/net/sky2.c37
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/w1/w1.c1
-rw-r--r--fs/ufs/super.c4
-rw-r--r--fs/xfs/xfs_filestream.c7
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--include/acpi/processor.h2
-rw-r--r--include/asm-mips/irq.h32
-rw-r--r--kernel/time/tick-broadcast.c17
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--sound/core/memalloc.c68
36 files changed, 518 insertions, 237 deletions
diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt
new file mode 100644
index 00000000000..c1e9545c59b
--- /dev/null
+++ b/Documentation/crypto/async-tx-api.txt
@@ -0,0 +1,219 @@
+ Asynchronous Transfers/Transforms API
+
+1 INTRODUCTION
+
+2 GENEALOGY
+
+3 USAGE
+3.1 General format of the API
+3.2 Supported operations
+3.3 Descriptor management
+3.4 When does the operation execute?
+3.5 When does the operation complete?
+3.6 Constraints
+3.7 Example
+
+4 DRIVER DEVELOPER NOTES
+4.1 Conformance points
+4.2 "My application needs finer control of hardware channels"
+
+5 SOURCE
+
+---
+
+1 INTRODUCTION
+
+The async_tx API provides methods for describing a chain of asynchronous
+bulk memory transfers/transforms with support for inter-transactional
+dependencies. It is implemented as a dmaengine client that smooths over
+the details of different hardware offload engine implementations. Code
+that is written to the API can optimize for asynchronous operation and
+the API will fit the chain of operations to the available offload
+resources.
+
+2 GENEALOGY
+
+The API was initially designed to offload the memory copy and
+xor-parity-calculations of the md-raid5 driver using the offload engines
+present in the Intel(R) Xscale series of I/O processors. It also built
+on the 'dmaengine' layer developed for offloading memory copies in the
+network stack using Intel(R) I/OAT engines. The following design
+features surfaced as a result:
+1/ implicit synchronous path: users of the API do not need to know if
+ the platform they are running on has offload capabilities. The
+ operation will be offloaded when an engine is available and carried out
+ in software otherwise.
+2/ cross channel dependency chains: the API allows a chain of dependent
+ operations to be submitted, like xor->copy->xor in the raid5 case. The
+ API automatically handles cases where the transition from one operation
+ to another implies a hardware channel switch.
+3/ dmaengine extensions to support multiple clients and operation types
+ beyond 'memcpy'
+
+3 USAGE
+
+3.1 General format of the API:
+struct dma_async_tx_descriptor *
+async_<operation>(<op specific parameters>,
+ enum async_tx_flags flags,
+ struct dma_async_tx_descriptor *dependency,
+ dma_async_tx_callback callback_routine,
+ void *callback_parameter);
+
+3.2 Supported operations:
+memcpy - memory copy between a source and a destination buffer
+memset - fill a destination buffer with a byte value
+xor - xor a series of source buffers and write the result to a
+ destination buffer
+xor_zero_sum - xor a series of source buffers and set a flag if the
+ result is zero. The implementation attempts to prevent
+ writes to memory
+
+3.3 Descriptor management:
+The return value is non-NULL and points to a 'descriptor' when the operation
+has been queued to execute asynchronously. Descriptors are recycled
+resources, under control of the offload engine driver, to be reused as
+operations complete. When an application needs to submit a chain of
+operations it must guarantee that the descriptor is not automatically recycled
+before the dependency is submitted. This requires that all descriptors be
+acknowledged by the application before the offload engine driver is allowed to
+recycle (or free) the descriptor. A descriptor can be acked by one of the
+following methods:
+1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted
+2/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent
+ descriptor of a new operation.
+3/ calling async_tx_ack() on the descriptor.
+
+3.4 When does the operation execute?
+Operations do not immediately issue after return from the
+async_<operation> call. Offload engine drivers batch operations to
+improve performance by reducing the number of mmio cycles needed to
+manage the channel. Once a driver-specific threshold is met the driver
+automatically issues pending operations. An application can force this
+event by calling async_tx_issue_pending_all(). This operates on all
+channels since the application has no knowledge of channel to operation
+mapping.
+
+3.5 When does the operation complete?
+There are two methods for an application to learn about the completion
+of an operation.
+1/ Call dma_wait_for_async_tx(). This call causes the CPU to spin while
+ it polls for the completion of the operation. It handles dependency
+ chains and issuing pending operations.
+2/ Specify a completion callback. The callback routine runs in tasklet
+ context if the offload engine driver supports interrupts, or it is
+ called in application context if the operation is carried out
+ synchronously in software. The callback can be set in the call to
+ async_<operation>, or when the application needs to submit a chain of
+ unknown length it can use the async_trigger_callback() routine to set a
+ completion interrupt/callback at the end of the chain.
+
+3.6 Constraints:
+1/ Calls to async_<operation> are not permitted in IRQ context. Other
+ contexts are permitted provided constraint #2 is not violated.
+2/ Completion callback routines cannot submit new operations. This
+ results in recursion in the synchronous case and spin_locks being
+ acquired twice in the asynchronous case.
+
+3.7 Example:
+Perform a xor->copy->xor operation where each operation depends on the
+result from the previous operation:
+
+void complete_xor_copy_xor(void *param)
+{
+ printk("complete\n");
+}
+
+int run_xor_copy_xor(struct page **xor_srcs,
+ int xor_src_cnt,
+ struct page *xor_dest,
+ size_t xor_len,
+ struct page *copy_src,
+ struct page *copy_dest,
+ size_t copy_len)
+{
+ struct dma_async_tx_descriptor *tx;
+
+ tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len,
+ ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL);
+ tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len,
+ ASYNC_TX_DEP_ACK, tx, NULL, NULL);
+ tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len,
+ ASYNC_TX_XOR_DROP_DST | ASYNC_TX_DEP_ACK | ASYNC_TX_ACK,
+ tx, complete_xor_copy_xor, NULL);
+
+ async_tx_issue_pending_all();
+}
+
+See include/linux/async_tx.h for more information on the flags. See the
+ops_run_* and ops_complete_* routines in drivers/md/raid5.c for more
+implementation examples.
+
+4 DRIVER DEVELOPMENT NOTES
+4.1 Conformance points:
+There are a few conformance points required in dmaengine drivers to
+accommodate assumptions made by applications using the async_tx API:
+1/ Completion callbacks are expected to happen in tasklet context
+2/ dma_async_tx_descriptor fields are never manipulated in IRQ context
+3/ Use async_tx_run_dependencies() in the descriptor clean up path to
+ handle submission of dependent operations
+
+4.2 "My application needs finer control of hardware channels"
+This requirement seems to arise from cases where a DMA engine driver is
+trying to support device-to-memory DMA. The dmaengine and async_tx
+implementations were designed for offloading memory-to-memory
+operations; however, there are some capabilities of the dmaengine layer
+that can be used for platform-specific channel management.
+Platform-specific constraints can be handled by registering the
+application as a 'dma_client' and implementing a 'dma_event_callback' to
+apply a filter to the available channels in the system. Before showing
+how to implement a custom dma_event callback some background of
+dmaengine's client support is required.
+
+The following routines in dmaengine support multiple clients requesting
+use of a channel:
+- dma_async_client_register(struct dma_client *client)
+- dma_async_client_chan_request(struct dma_client *client)
+
+dma_async_client_register takes a pointer to an initialized dma_client
+structure. It expects that the 'event_callback' and 'cap_mask' fields
+are already initialized.
+
+dma_async_client_chan_request triggers dmaengine to notify the client of
+all channels that satisfy the capability mask. It is up to the client's
+event_callback routine to track how many channels the client needs and
+how many it is currently using. The dma_event_callback routine returns a
+dma_state_client code to let dmaengine know the status of the
+allocation.
+
+Below is the example of how to extend this functionality for
+platform-specific filtering of the available channels beyond the
+standard capability mask:
+
+static enum dma_state_client
+my_dma_client_callback(struct dma_client *client,
+ struct dma_chan *chan, enum dma_state state)
+{
+ struct dma_device *dma_dev;
+ struct my_platform_specific_dma *plat_dma_dev;
+
+ dma_dev = chan->device;
+ plat_dma_dev = container_of(dma_dev,
+ struct my_platform_specific_dma,
+ dma_dev);
+
+ if (!plat_dma_dev->platform_specific_capability)
+ return DMA_DUP;
+
+ . . .
+}
+
+5 SOURCE
+include/linux/dmaengine.h: core header file for DMA drivers and clients
+drivers/dma/dmaengine.c: offload engine channel management routines
+drivers/dma/: location for offload engine drivers
+include/linux/async_tx.h: core header file for the async_tx api
+crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code
+crypto/async_tx/async_memcpy.c: copy offload
+crypto/async_tx/async_memset.c: memory fill offload
+crypto/async_tx/async_xor.c: xor and xor zero sum offload
diff --git a/Makefile b/Makefile
index c265e41ec55..4dac25301d5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 23
-EXTRAVERSION =-rc7
+EXTRAVERSION =-rc8
NAME = Arr Matey! A Hairy Bilge Rat!
# *DOCUMENTATION*
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 851cc7158ca..70b2c780111 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -336,7 +336,7 @@ static int ep93xx_gpio_irq_type(unsigned int irq, unsigned int type)
if (line >= 0 && line < 16) {
gpio_line_config(line, GPIO_IN);
} else {
- gpio_line_config(EP93XX_GPIO_LINE_F(line), GPIO_IN);
+ gpio_line_config(EP93XX_GPIO_LINE_F(line-16), GPIO_IN);
}
port = line >> 3;
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index b4e9b734e0b..76b800a9519 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -57,7 +57,17 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
{
unsigned long addr;
- start &= ~(CACHE_LINE_SIZE - 1);
+ if (start & (CACHE_LINE_SIZE - 1)) {
+ start &= ~(CACHE_LINE_SIZE - 1);
+ sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1);
+ start += CACHE_LINE_SIZE;
+ }
+
+ if (end & (CACHE_LINE_SIZE - 1)) {
+ end &= ~(CACHE_LINE_SIZE - 1);
+ sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1);
+ }
+
for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
sync_writel(addr, L2X0_INV_LINE_PA, 1);
cache_sync();
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index b6c30800c66..3a2d255361b 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -177,10 +177,7 @@ handle_real_irq:
outb(cached_master_mask, PIC_MASTER_IMR);
outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
}
-#ifdef CONFIG_MIPS_MT_SMTC
- if (irq_hwmask[irq] & ST0_IM)
- set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+ smtc_im_ack_irq(irq);
spin_unlock_irqrestore(&i8259A_lock, flags);
return;
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 410868b5ea5..1ecdd50bfc6 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -52,11 +52,8 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
mask_msc_irq(irq);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
-#ifdef CONFIG_MIPS_MT_SMTC
/* This actually needs to be a call into platform code */
- if (irq_hwmask[irq] & ST0_IM)
- set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+ smtc_im_ack_irq(irq);
}
/*
@@ -73,10 +70,7 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
}
-#ifdef CONFIG_MIPS_MT_SMTC
- if (irq_hwmask[irq] & ST0_IM)
- set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+ smtc_im_ack_irq(irq);
}
/*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index aeded6c17de..a990aad2f04 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -74,20 +74,12 @@ EXPORT_SYMBOL_GPL(free_irqno);
*/
void ack_bad_irq(unsigned int irq)
{
+ smtc_im_ack_irq(irq);
printk("unexpected IRQ # %d\n", irq);
}
atomic_t irq_err_count;
-#ifdef CONFIG_MIPS_MT_SMTC
-/*
- * SMTC Kernel needs to manipulate low-level CPU interrupt mask
- * in do_IRQ. These are passed in setup_irq_smtc() and stored
- * in this table.
- */
-unsigned long irq_hwmask[NR_IRQS];
-#endif /* CONFIG_MIPS_MT_SMTC */
-
/*
* Generic, controller-independent functions:
*/
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 43826c16101..f09404377ef 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -25,8 +25,11 @@
#include <asm/smtc_proc.h>
/*
- * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
+ * SMTC Kernel needs to manipulate low-level CPU interrupt mask
+ * in do_IRQ. These are passed in setup_irq_smtc() and stored
+ * in this table.
*/
+unsigned long irq_hwmask[NR_IRQS];
#define LOCK_MT_PRA() \
local_irq_save(flags); \
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e477c9d0498..8a1b001d0b1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -605,6 +605,13 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
regs->ccr = 0;
regs->gpr[1] = sp;
+ /*
+ * We have just cleared all the nonvolatile GPRs, so make
+ * FULL_REGS(regs) return true. This is necessary to allow
+ * ptrace to examine the thread immediately after exec.
+ */
+ regs->trap &= ~1UL;
+
#ifdef CONFIG_PPC32
regs->mq = 0;
regs->nip = start;
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 035007145e7..bc18cbb8ea7 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -80,6 +80,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{
enum dma_status status;
struct dma_async_tx_descriptor *iter;
+ struct dma_async_tx_descriptor *parent;
if (!tx)
return DMA_SUCCESS;
@@ -87,8 +88,15 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
/* poll through the dependency chain, return when tx is complete */
do {
iter = tx;
- while (iter->cookie == -EBUSY)
- iter = iter->parent;
+
+ /* find the root of the unsubmitted dependency chain */
+ while (iter->cookie == -EBUSY) {
+ parent = iter->parent;
+ if (parent && parent->cookie == -EBUSY)
+ iter = iter->parent;
+ else
+ break;
+ }
status = dma_sync_wait(iter->chan, iter->cookie);
} while (status == DMA_IN_PROGRESS || (iter != tx));
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 2afb3d2086b..9f11dc296cd 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -102,6 +102,8 @@ static struct acpi_driver acpi_processor_driver = {
.add = acpi_processor_add,
.remove = acpi_processor_remove,
.start = acpi_processor_start,
+ .suspend = acpi_processor_suspend,
+ .resume = acpi_processor_resume,
},
};
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d9b8af763e1..f18261368e7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -325,6 +325,23 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr,
#endif
+/*
+ * Suspend / resume control
+ */
+static int acpi_idle_suspend;
+
+int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
+{
+ acpi_idle_suspend = 1;
+ return 0;
+}
+
+int acpi_processor_resume(struct acpi_device * device)
+{
+ acpi_idle_suspend = 0;
+ return 0;
+}
+
static void acpi_processor_idle(void)
{
struct acpi_processor *pr = NULL;
@@ -355,7 +372,7 @@ static void acpi_processor_idle(void)
}
cx = pr->power.state;
- if (!cx) {
+ if (!cx || acpi_idle_suspend) {
if (pm_idle_save)
pm_idle_save();
else
diff --git a/drivers/acpi/sleep/Makefile b/drivers/acpi/sleep/Makefile
index 195a4f69c0f..ba9bd403d44 100644
--- a/drivers/acpi/sleep/Makefile
+++ b/drivers/acpi/sleep/Makefile
@@ -1,4 +1,4 @@
-obj-y := poweroff.o wakeup.o
+obj-y := wakeup.o
obj-$(CONFIG_ACPI_SLEEP) += main.o
obj-$(CONFIG_ACPI_SLEEP) += proc.o
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index c52ade816fb..85633c585aa 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -15,6 +15,9 @@
#include <linux/dmi.h>
#include <linux/device.h>
#include <linux/suspend.h>
+
+#include <asm/io.h>
+
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include "sleep.h"
@@ -57,6 +60,27 @@ static int acpi_pm_set_target(suspend_state_t pm_state)
return error;
}
+int acpi_sleep_prepare(u32 acpi_state)
+{
+#ifdef CONFIG_ACPI_SLEEP
+ /* do we have a wakeup address for S2 and S3? */
+ if (acpi_state == ACPI_STATE_S3) {
+ if (!acpi_wakeup_address) {
+ return -EFAULT;
+ }
+ acpi_set_firmware_waking_vector((acpi_physical_address)
+ virt_to_phys((void *)
+ acpi_wakeup_address));
+
+ }
+ ACPI_FLUSH_CPU_CACHE();
+ acpi_enable_wakeup_device_prep(acpi_state);
+#endif
+ acpi_gpe_sleep_prepare(acpi_state);
+ acpi_enter_sleep_state_prep(acpi_state);
+ return 0;
+}
+
/**
* acpi_pm_prepare - Do preliminary suspend work.
* @pm_state: ignored
@@ -350,6 +374,20 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p)
return d_max;
}
+static void acpi_power_off_prepare(void)
+{
+ /* Prepare to power off the system */
+ acpi_sleep_prepare(ACPI_STATE_S5);
+}
+
+static void acpi_power_off(void)
+{
+ /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
+ printk("%s called\n", __FUNCTION__);
+ local_irq_disable();
+ acpi_enter_sleep_state(ACPI_STATE_S5);
+}
+
int __init acpi_sleep_init(void)
{
acpi_status status;
@@ -363,16 +401,17 @@ int __init acpi_sleep_init(void)
if (acpi_disabled)
return 0;
+ sleep_states[ACPI_STATE_S0] = 1;
+ printk(KERN_INFO PREFIX "(supports S0");
+
#ifdef CONFIG_SUSPEND
- printk(KERN_INFO PREFIX "(supports");
- for (i = ACPI_STATE_S0; i < ACPI_STATE_S4; i++) {
+ for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
status = acpi_get_sleep_type_data(i, &type_a, &type_b);
if (ACPI_SUCCESS(status)) {
sleep_states[i] = 1;
printk(" S%d", i);
}
}
- printk(")\n");
pm_set_ops(&acpi_pm_ops);
#endif
@@ -382,10 +421,16 @@ int __init acpi_sleep_init(void)
if (ACPI_SUCCESS(status)) {
hibernation_set_ops(&acpi_hibernation_ops);
sleep_states[ACPI_STATE_S4] = 1;
+ printk(" S4");
}
-#else
- sleep_states[ACPI_STATE_S4] = 0;
#endif
-
+ status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
+ if (ACPI_SUCCESS(status)) {
+ sleep_states[ACPI_STATE_S5] = 1;
+ printk(" S5");
+ pm_power_off_prepare = acpi_power_off_prepare;
+ pm_power_off = acpi_power_off;
+ }
+ printk(")\n");
return 0;
}
diff --git a/drivers/acpi/sleep/poweroff.c b/drivers/acpi/sleep/poweroff.c
deleted file mode 100644
index 39e40d56b03..00000000000
--- a/drivers/acpi/sleep/poweroff.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * poweroff.c - ACPI handler for powering off the system.
- *
- * AKA S5, but it is independent of whether or not the kernel supports
- * any other sleep support in the system.
- *
- * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/pm.h>
-#include <linux/init.h>
-#include <acpi/acpi_bus.h>
-#include <linux/sysdev.h>
-#include <asm/io.h>
-#include "sleep.h"
-
-int acpi_sleep_prepare(u32 acpi_state)
-{
-#ifdef CONFIG_ACPI_SLEEP
- /* do we have a wakeup address for S2 and S3? */
- if (acpi_state == ACPI_STATE_S3) {
- if (!acpi_wakeup_address) {
- return -EFAULT;
- }
- acpi_set_firmware_waking_vector((acpi_physical_address)
- virt_to_phys((void *)
- acpi_wakeup_address));
-
- }
- ACPI_FLUSH_CPU_CACHE();
- acpi_enable_wakeup_device_prep(acpi_state);
-#endif
- acpi_gpe_sleep_prepare(acpi_state);
- acpi_enter_sleep_state_prep(acpi_state);
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-static void acpi_power_off_prepare(void)
-{
- /* Prepare to power off the system */
- acpi_sleep_prepare(ACPI_STATE_S5);
-}
-
-static void acpi_power_off(void)
-{
- /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
- printk("%s called\n", __FUNCTION__);
- local_irq_disable();
- /* Some SMP machines only can poweroff in boot CPU */
- acpi_enter_sleep_state(ACPI_STATE_S5);
-}
-
-static int acpi_poweroff_init(void)
-{
- if (!acpi_disabled) {
- u8 type_a, type_b;
- acpi_status status;
-
- status =
- acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
- pm_power_off_prepare = acpi_power_off_prepare;
- pm_power_off = acpi_power_off;
- }
- }
- return 0;
-}
-
-late_initcall(acpi_poweroff_init);
-
-#endif /* CONFIG_PM */
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 3c9bb85a6a9..d05891f1628 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -417,7 +417,6 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
arg0.integer.value = level;
status = acpi_evaluate_object(device->dev->handle, "_BCM", &args, NULL);
- printk(KERN_DEBUG "set_level status: %x\n", status);
return status;
}
@@ -1754,7 +1753,7 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
{
- return acpi_video_bus_DOS(video, 1, 0);
+ return acpi_video_bus_DOS(video, 0, 0);
}
static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 049a46cc9f8..04ac155d3a0 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -155,23 +155,22 @@ mspec_open(struct vm_area_struct *vma)
* mspec_close
*
* Called when unmapping a device mapping. Frees all mspec pages
- * belonging to the vma.
+ * belonging to all the vma's sharing this vma_data structure.
*/
static void
mspec_close(struct vm_area_struct *vma)
{
struct vma_data *vdata;
- int index, last_index, result;
+ int index, last_index;
unsigned long my_page;
vdata = vma->vm_private_data;
- BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end);
+ if (!atomic_dec_and_test(&vdata->refcnt))
+ return;
- spin_lock(&vdata->lock);
- index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT;
- last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT;
- for (; index < last_index; index++) {
+ last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
+ for (index = 0; index < last_index; index++) {
if (vdata->maddr[index] == 0)
continue;
/*
@@ -180,20 +179,12 @@ mspec_close(struct vm_area_struct *vma)
*/
my_page = vdata->maddr[index];
vdata->maddr[index] = 0;
- spin_unlock(&vdata->lock);
- result = mspec_zero_block(my_page, PAGE_SIZE);
- if (!result)
+ if (!mspec_zero_block(my_page, PAGE_SIZE))
uncached_free_page(my_page);
else
printk(KERN_WARNING "mspec_close(): "
- "failed to zero page %i\n",
- result);
- spin_lock(&vdata->lock);
+ "failed to zero page %ld\n", my_page);
}
- spin_unlock(&vdata->lock);
-
- if (!atomic_dec_and_test(&vdata->refcnt))
- return;
if (vdata->flags & VMD_VMALLOCED)
vfree(vdata);
@@ -201,7 +192,6 @@ mspec_close(struct vm_area_struct *vma)
kfree(vdata);
}
-
/*
* mspec_nopfn
*
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ba0428d872a..85c51bdc36f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1211,12 +1211,42 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
}
-static void set_data_seg(struct mlx4_wqe_data_seg *dseg,
- struct ib_sge *sg)
+static void set_mlx_icrc_seg(void *dseg)
+{
+ u32 *t = dseg;
+ struct mlx4_wqe_inline_seg *iseg = dseg;
+
+ t[1] = 0;
+
+ /*
+ * Need a barrier here before writing the byte_count field to
+ * make sure that all the data is visible before the
+ * byte_count field is set. Otherwise, if the segment begins
+ * a new cacheline, the HCA prefetcher could grab the 64-byte
+ * chunk and get a valid (!= * 0xffffffff) byte count but
+ * stale data, and end up sending the wrong data.
+ */
+ wmb();
+
+ iseg->byte_count = cpu_to_be32((1 << 31) | 4);
+}
+
+static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
{
- dseg->byte_count = cpu_to_be32(sg->length);
dseg->lkey = cpu_to_be32(sg->lkey);
dseg->addr = cpu_to_be64(sg->addr);
+
+ /*
+ * Need a barrier here before writing the byte_count field to
+ * make sure that all the data is visible before the
+ * byte_count field is set. Otherwise, if the segment begins
+ * a new cacheline, the HCA prefetcher could grab the 64-byte
+ * chunk and get a valid (!= * 0xffffffff) byte count but
+ * stale data, and end up sending the wrong data.
+ */
+ wmb();
+
+ dseg->byte_count = cpu_to_be32(sg->length);
}
int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
@@ -1225,6 +1255,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct mlx4_ib_qp *qp = to_mqp(ibqp);
void *wqe;
struct mlx4_wqe_ctrl_seg *ctrl;
+ struct mlx4_wqe_data_seg *dseg;
unsigned long flags;
int nreq;
int err = 0;
@@ -1324,22 +1355,27 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
}
- for (i = 0; i < wr->num_sge; ++i) {
- set_data_seg(wqe, wr->sg_list + i);
+ /*
+ * Write data segments in reverse order, so as to
+ * overwrite cacheline stamp last within each
+ * cacheline. This avoids issues with WQE
+ * prefetching.
+ */
- wqe += sizeof (struct mlx4_wqe_data_seg);
- size += sizeof (struct mlx4_wqe_data_seg) / 16;
- }
+ dseg = wqe;
+ dseg += wr->num_sge - 1;
+ size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
/* Add one more inline data segment for ICRC for MLX sends */
- if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) {
- ((struct mlx4_wqe_inline_seg *) wqe)->byte_count =
- cpu_to_be32((1 << 31) | 4);
- ((u32 *) wqe)[1] = 0;
- wqe += sizeof (struct mlx4_wqe_data_seg);
+ if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)) {
+ set_mlx_icrc_seg(dseg + 1);
size += sizeof (struct mlx4_wqe_data_seg) / 16;
}
+ for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
+ set_data_seg(dseg, wr->sg_list + i);
+
ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
MLX4_WQE_CTRL_FENCE : 0) | size;
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 2bea1b2c631..a1804bfdbb8 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -328,6 +328,7 @@ static void atp_complete(struct urb* urb)
{
int x, y, x_z, y_z, x_f, y_f;
int retval, i, j;
+ int key;
struct atp *dev = urb->context;
switch (urb->status) {
@@ -468,6 +469,7 @@ static void atp_complete(struct urb* urb)
ATP_XFACT, &x_z, &x_f);
y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS,
ATP_YFACT, &y_z, &y_f);
+ key = dev->data[dev->datalen - 1] & 1;
if (x && y) {
if (dev->x_old != -1) {
@@ -505,7 +507,7 @@ static void atp_complete(struct urb* urb)
the first touch unless reinitialised. Do so if it's been
idle for a while in order to avoid waking the kernel up
several hundred times a second */
- if (atp_is_geyser_3(dev)) {
+ if (!key && atp_is_geyser_3(dev)) {
dev->idlecount++;
if (dev->idlecount == 10) {
dev->valid = 0;
@@ -514,7 +516,7 @@ static void atp_complete(struct urb* urb)
}
}
- input_report_key(dev->input, BTN_LEFT, dev->data[dev->datalen - 1] & 1);
+ input_report_key(dev->input, BTN_LEFT, key);
input_sync(dev->input);
exit:
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index 7b64fd4aa2f..0a419a0de60 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -6,7 +6,8 @@ menuconfig VIRTUALIZATION
depends on X86
default y
---help---
- Say Y here to get to see options for virtualization guest drivers.
+ Say Y here to get to see options for using your Linux host to run other
+ operating systems inside virtual machines (guests).
This option alone does not add any kernel code.
If you say N, all options in this submenu will be skipped and disabled.
diff --git a/drivers/lguest/lguest_asm.S b/drivers/lguest/lguest_asm.S
index f182c6a3620..1ddcd5cd20f 100644
--- a/drivers/lguest/lguest_asm.S
+++ b/drivers/lguest/lguest_asm.S
@@ -22,8 +22,9 @@
jmp lguest_init
/*G:055 We create a macro which puts the assembler code between lgstart_ and
- * lgend_ markers. These templates end up in the .init.text section, so they
- * are discarded after boot. */
+ * lgend_ markers. These templates are put in the .text section: they can't be
+ * discarded after boot as we may need to patch modules, too. */
+.text
#define LGUEST_PATCH(name, insns...) \
lgstart_##name: insns; lgend_##name:; \
.globl lgstart_##name; .globl lgend_##name
@@ -34,7 +35,6 @@ LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
/*:*/
-.text
/* These demark the EIP range where host should never deliver interrupts. */
.global lguest_noirq_start
.global lguest_noirq_end
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4d63773ee73..f96dea975fa 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
struct stripe_head *sh = stripe_head_ref;
struct bio *return_bi = NULL;
raid5_conf_t *conf = sh->raid_conf;
- int i, more_to_read = 0;
+ int i;
pr_debug("%s: stripe %llu\n", __FUNCTION__,
(unsigned long long)sh->sector);
@@ -522,16 +522,14 @@ static void ops_complete_biofill(void *stripe_head_ref)
/* clear completed biofills */
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- /* check if this stripe has new incoming reads */
- if (dev->toread)
- more_to_read++;
/* acknowledge completion of a biofill operation */
- /* and check if we need to reply to a read request
- */
- if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) {
+ /* and check if we need to reply to a read request,
+ * new R5_Wantfill requests are held off until
+ * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
+ */
+ if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
struct bio *rbi, *rbi2;
- clear_bit(R5_Wantfill, &dev->flags);
/* The access to dev->read is outside of the
* spin_lock_irq(&conf->device_lock), but is protected
@@ -558,8 +556,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
return_io(return_bi);
- if (more_to_read)
- set_bit(STRIPE_HANDLE, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index c06cae3f0b5..503f2685fb7 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -116,7 +116,7 @@ struct el3_private {
spinlock_t lock;
};
-static const char *if_names[] = { "auto", "10base2", "10baseT", "AUI" };
+static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
/*====================================================================*/
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index b85ab4a8f2a..c921ec32c23 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1228,7 +1228,10 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
return;
}
- /* phy config for RTL8169s mac_version C chip */
+ if ((tp->mac_version != RTL_GIGA_MAC_VER_02) &&
+ (tp->mac_version != RTL_GIGA_MAC_VER_03))
+ return;
+
mdio_write(ioaddr, 31, 0x0001); //w 31 2 0 1
mdio_write(ioaddr, 21, 0x1000); //w 21 15 0 1000
mdio_write(ioaddr, 24, 0x65c7); //w 24 15 0 65c7
@@ -2567,6 +2570,15 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
(TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
netif_wake_queue(dev);
}
+ /*
+ * 8168 hack: TxPoll requests are lost when the Tx packets are
+ * too close. Let's kick an extra TxPoll request when a burst
+ * of start_xmit activity is detected (if it is not detected,
+ * it is slow enough). -- FR
+ */
+ smp_rmb();
+ if (tp->cur_tx != dirty_tx)
+ RTL_W8(TxPoll, NPQ);
}
}
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index eaffe551d1d..0792031a5cf 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -338,6 +338,16 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
if (!(hw->flags & SKY2_HW_GIGABIT)) {
/* enable automatic crossover */
ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
+
+ if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+ hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+ u16 spec;
+
+ /* Enable Class A driver for FE+ A0 */
+ spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
+ spec |= PHY_M_FESC_SEL_CL_A;
+ gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
+ }
} else {
/* disable energy detect */
ctrl &= ~PHY_M_PC_EN_DET_MSK;
@@ -816,7 +826,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
- if (!(hw->flags & SKY2_HW_RAMBUFFER)) {
+ /* On chips without ram buffer, pause is controled by MAC level */
+ if (sky2_read8(hw, B2_E_0) == 0) {
sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
@@ -1271,7 +1282,7 @@ static int sky2_up(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- u32 imask;
+ u32 imask, ramsize;
int cap, err = -ENOMEM;
struct net_device *otherdev = hw->dev[sky2->port^1];
@@ -1326,13 +1337,12 @@ static int sky2_up(struct net_device *dev)
sky2_mac_init(hw, port);
- if (hw->flags & SKY2_HW_RAMBUFFER) {
- /* Register is number of 4K blocks on internal RAM buffer. */
- u32 ramsize = sky2_read8(hw, B2_E_0) * 4;
+ /* Register is number of 4K blocks on internal RAM buffer. */
+ ramsize = sky2_read8(hw, B2_E_0) * 4;
+ if (ramsize > 0) {
u32 rxspace;
- printk(KERN_DEBUG PFX "%s: ram buffer %dK\n", dev->name, ramsize);
-
+ pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
if (ramsize < 16)
rxspace = ramsize / 2;
else
@@ -1995,7 +2005,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
synchronize_irq(hw->pdev->irq);
- if (!(hw->flags & SKY2_HW_RAMBUFFER))
+ if (sky2_read8(hw, B2_E_0) == 0)
sky2_set_tx_stfwd(hw, port);
ctl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2526,7 +2536,7 @@ static void sky2_watchdog(unsigned long arg)
++active;
/* For chips with Rx FIFO, check if stuck */
- if ((hw->flags & SKY2_HW_RAMBUFFER) &&
+ if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) &&
sky2_rx_hung(dev)) {
pr_info(PFX "%s: receiver hang detected\n",
dev->name);
@@ -2684,8 +2694,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
switch(hw->chip_id) {
case CHIP_ID_YUKON_XL:
hw->flags = SKY2_HW_GIGABIT
- | SKY2_HW_NEWER_PHY
- | SKY2_HW_RAMBUFFER;
+ | SKY2_HW_NEWER_PHY;
+ if (hw->chip_rev < 3)
+ hw->flags |= SKY2_HW_FIFO_HANG_CHECK;
+
break;
case CHIP_ID_YUKON_EC_U:
@@ -2711,11 +2723,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
return -EOPNOTSUPP;
}
- hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RAMBUFFER;
+ hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK;
break;
case CHIP_ID_YUKON_FE:
- hw->flags = SKY2_HW_RAMBUFFER;
break;
case CHIP_ID_YUKON_FE_P:
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 69cd98400fe..8bc5c54e3ef 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2063,7 +2063,7 @@ struct sky2_hw {
#define SKY2_HW_FIBRE_PHY 0x00000002
#define SKY2_HW_GIGABIT 0x00000004
#define SKY2_HW_NEWER_PHY 0x00000008
-#define SKY2_HW_RAMBUFFER 0x00000010 /* chip has RAM FIFO */
+#define SKY2_HW_FIFO_HANG_CHECK 0x00000010
#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index c7c4574729b..de3155b2128 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -289,6 +289,7 @@ int power_supply_uevent(struct device *dev, char **envp, int num_envp,
if (ret)
goto out;
}
+ envp[i] = NULL;
out:
free_page((unsigned long)prop_buf);
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 8d7ab74170d..a593f900eff 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -431,6 +431,7 @@ static int w1_uevent(struct device *dev, char **envp, int num_envp,
err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size,
&cur_len, "W1_SLAVE_ID=%024LX",
(unsigned long long)sl->reg_num.id);
+ envp[cur_index] = NULL;
if (err)
return err;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 73402c5eeb8..38eb0b7a1f3 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -894,7 +894,7 @@ magic_found:
goto again;
}
-
+ sbi->s_flags = flags;/*after that line some functions use s_flags*/
ufs_print_super_stuff(sb, usb1, usb2, usb3);
/*
@@ -1025,8 +1025,6 @@ magic_found:
UFS_MOUNT_UFSTYPE_44BSD)
uspi->s_maxsymlinklen =
fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
-
- sbi->s_flags = flags;
inode = iget(sb, UFS_ROOTINO);
if (!inode || is_bad_inode(inode))
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 16f8e175167..36d8f6aa11a 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -350,9 +350,10 @@ _xfs_filestream_update_ag(
/* xfs_fstrm_free_func(): callback for freeing cached stream items. */
void
xfs_fstrm_free_func(
- xfs_ino_t ino,
- fstrm_item_t *item)
+ unsigned long ino,
+ void *data)
{
+ fstrm_item_t *item = (fstrm_item_t *)data;
xfs_inode_t *ip = item->ip;
int ref;
@@ -438,7 +439,7 @@ xfs_filestream_mount(
grp_count = 10;
err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count,
- (xfs_mru_cache_free_func_t)xfs_fstrm_free_func);
+ xfs_fstrm_free_func);
return err;
}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index dacb19739cc..7174991f4be 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1920,9 +1920,9 @@ xlog_recover_do_reg_buffer(
stale_buf = 1;
break;
}
- if (be16_to_cpu(dip->di_core.di_mode))
+ if (dip->di_core.di_mode)
mode_count++;
- if (be16_to_cpu(dip->di_core.di_gen))
+ if (dip->di_core.di_gen)
gen_count++;
}
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index ec3ffdadb4d..99934a999e6 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -320,6 +320,8 @@ int acpi_processor_power_init(struct acpi_processor *pr,
int acpi_processor_cst_has_changed(struct acpi_processor *pr);
int acpi_processor_power_exit(struct acpi_processor *pr,
struct acpi_device *device);
+int acpi_processor_suspend(struct acpi_device * device, pm_message_t state);
+int acpi_processor_resume(struct acpi_device * device);
/* in processor_thermal.c */
int acpi_processor_get_limit_info(struct acpi_processor *pr);
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index 97102ebc54b..2cb52cf8bd4 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -24,7 +24,30 @@ static inline int irq_canonicalize(int irq)
#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
#endif
+#ifdef CONFIG_MIPS_MT_SMTC
+
+struct irqaction;
+
+extern unsigned long irq_hwmask[];
+extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
+ unsigned long hwmask);
+
+static inline void smtc_im_ack_irq(unsigned int irq)
+{
+ if (irq_hwmask[irq] & ST0_IM)
+ set_c0_status(irq_hwmask[irq] & ST0_IM);
+}
+
+#else
+
+static inline void smtc_im_ack_irq(unsigned int irq)
+{
+}
+
+#endif /* CONFIG_MIPS_MT_SMTC */
+
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
+
/*
* Clear interrupt mask handling "backstop" if irq_hwmask
* entry so indicates. This implies that the ack() or end()
@@ -38,6 +61,7 @@ do { \
~(irq_hwmask[irq] & 0x0000ff00)); \
} while (0)
#else
+
#define __DO_IRQ_SMTC_HOOK(irq) do { } while (0)
#endif
@@ -60,14 +84,6 @@ do { \
extern void arch_init_irq(void);
extern void spurious_interrupt(void);
-#ifdef CONFIG_MIPS_MT_SMTC
-struct irqaction;
-
-extern unsigned long irq_hwmask[];
-extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
- unsigned long hwmask);
-#endif /* CONFIG_MIPS_MT_SMTC */
-
extern int allocate_irqno(void);
extern void alloc_legacy_irqno(void);
extern void free_irqno(unsigned int irq);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index aab881c86a1..0962e057766 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -382,23 +382,8 @@ static int tick_broadcast_set_event(ktime_t expires, int force)
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{
- int cpu = smp_processor_id();
-
- /*
- * If the CPU is marked for broadcast, enforce oneshot
- * broadcast mode. The jinxed VAIO does not resume otherwise.
- * No idea why it ends up in a lower C State during resume
- * without notifying the clock events layer.
- */
- if (cpu_isset(cpu, tick_broadcast_mask))
- cpu_set(cpu, tick_broadcast_oneshot_mask);
-
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
-
- if(!cpus_empty(tick_broadcast_oneshot_mask))
- tick_broadcast_set_event(ktime_get(), 1);
-
- return cpu_isset(cpu, tick_broadcast_oneshot_mask);
+ return 0;
}
/*
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 50a94eee4d9..495863a500c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -284,7 +284,7 @@ config LOCKDEP
select KALLSYMS_ALL
config LOCK_STAT
- bool "Lock usage statisitics"
+ bool "Lock usage statistics"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select LOCKDEP
select DEBUG_SPINLOCK
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index f057430db0d..9b5656d8bcc 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
@@ -481,53 +482,54 @@ static void free_all_reserved_pages(void)
#define SND_MEM_PROC_FILE "driver/snd-page-alloc"
static struct proc_dir_entry *snd_mem_proc;
-static int snd_mem_proc_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int snd_mem_proc_read(struct seq_file *seq, void *offset)
{
- int len = 0;
long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
struct snd_mem_list *mem;
int devno;
static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" };
mutex_lock(&list_mutex);
- len += snprintf(page + len, count - len,
- "pages : %li bytes (%li pages per %likB)\n",
- pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
+ seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n",
+ pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
devno = 0;
list_for_each_entry(mem, &mem_list_head, list) {
devno++;
- len += snprintf(page + len, count - len,
- "buffer %d : ID %08x : type %s\n",
- devno, mem->id, types[mem->buffer.dev.type]);
- len += snprintf(page + len, count - len,
- " addr = 0x%lx, size = %d bytes\n",
- (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes);
+ seq_printf(seq, "buffer %d : ID %08x : type %s\n",
+ devno, mem->id, types[mem->buffer.dev.type]);
+ seq_printf(seq, " addr = 0x%lx, size = %d bytes\n",
+ (unsigned long)mem->buffer.addr,
+ (int)mem->buffer.bytes);
}
mutex_unlock(&list_mutex);
- return len;
+ return 0;
+}
+
+static int snd_mem_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, snd_mem_proc_read, NULL);
}
/* FIXME: for pci only - other bus? */
#ifdef CONFIG_PCI
#define gettoken(bufp) strsep(bufp, " \t\n")
-static int snd_mem_proc_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
+ size_t count, loff_t * ppos)
{
char buf[128];
char *token, *p;
- if (count > ARRAY_SIZE(buf) - 1)
- count = ARRAY_SIZE(buf) - 1;
+ if (count > sizeof(buf) - 1)
+ return -EINVAL;
if (copy_from_user(buf, buffer, count))
return -EFAULT;
- buf[ARRAY_SIZE(buf) - 1] = '\0';
+ buf[count] = '\0';
p = buf;
token = gettoken(&p);
if (! token || *token == '#')
- return (int)count;
+ return count;
if (strcmp(token, "add") == 0) {
char *endp;
int vendor, device, size, buffers;
@@ -548,7 +550,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
(buffers = simple_strtol(token, NULL, 0)) <= 0 ||
buffers > 4) {
printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
- return (int)count;
+ return count;
}
vendor &= 0xffff;
device &= 0xffff;
@@ -560,7 +562,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
if (pci_set_dma_mask(pci, mask) < 0 ||
pci_set_consistent_dma_mask(pci, mask) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
- return (int)count;
+ return count;
}
}
for (i = 0; i < buffers; i++) {
@@ -570,7 +572,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
size, &dmab) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
pci_dev_put(pci);
- return (int)count;
+ return count;
}
snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
}
@@ -596,9 +598,21 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
free_all_reserved_pages();
else
printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
- return (int)count;
+ return count;
}
#endif /* CONFIG_PCI */
+
+static const struct file_operations snd_mem_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = snd_mem_proc_open,
+ .read = seq_read,
+#ifdef CONFIG_PCI
+ .write = snd_mem_proc_write,
+#endif
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
#endif /* CONFIG_PROC_FS */
/*
@@ -609,12 +623,8 @@ static int __init snd_mem_init(void)
{
#ifdef CONFIG_PROC_FS
snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL);
- if (snd_mem_proc) {
- snd_mem_proc->read_proc = snd_mem_proc_read;
-#ifdef CONFIG_PCI
- snd_mem_proc->write_proc = snd_mem_proc_write;
-#endif
- }
+ if (snd_mem_proc)
+ snd_mem_proc->proc_fops = &snd_mem_proc_fops;
#endif
return 0;
}