summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt9
-rw-r--r--Documentation/watchdog/watchdog-api.txt3
-rw-r--r--arch/arm/mach-pxa/mainstone.c5
-rw-r--r--arch/arm/mach-s3c2410/sleep.S6
-rw-r--r--arch/ia64/configs/sn2_defconfig4
-rw-r--r--arch/ia64/kernel/iosapic.c4
-rw-r--r--arch/ia64/kernel/irq.c1
-rw-r--r--drivers/char/watchdog/i8xx_tco.c16
-rw-r--r--drivers/char/watchdog/s3c2410_wdt.c6
-rw-r--r--drivers/char/watchdog/sc1200wdt.c2
-rw-r--r--drivers/ieee1394/ohci1394.c2
-rw-r--r--drivers/ieee1394/sbp2.c209
-rw-r--r--drivers/ieee1394/sbp2.h18
-rw-r--r--drivers/infiniband/core/uverbs_mem.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c35
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c10
-rw-r--r--drivers/mmc/au1xmmc.c6
-rw-r--r--drivers/mmc/imxmmc.c24
-rw-r--r--drivers/mmc/mmc.c1
-rw-r--r--drivers/mmc/mmc_block.c1
-rw-r--r--drivers/mmc/pxamci.c4
-rw-r--r--drivers/mmc/wbsd.c8
-rw-r--r--drivers/net/forcedeth.c312
-rw-r--r--drivers/net/ixp2000/enp2611.c13
-rw-r--r--drivers/net/ixp2000/pm3386.c30
-rw-r--r--drivers/net/ixp2000/pm3386.h1
-rw-r--r--drivers/net/skge.c3
-rw-r--r--drivers/net/sky2.c49
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/via-rhine.c34
-rw-r--r--fs/Makefile2
-rw-r--r--fs/configfs/dir.c137
-rw-r--r--fs/ocfs2/aops.c46
-rw-r--r--fs/ocfs2/aops.h4
-rw-r--r--fs/ocfs2/extent_map.c6
-rw-r--r--fs/ocfs2/file.c86
-rw-r--r--fs/ocfs2/journal.c8
-rw-r--r--fs/ocfs2/uptodate.c4
-rw-r--r--fs/ocfs2/vote.c6
-rw-r--r--include/asm-arm/spinlock.h6
-rw-r--r--include/linux/mmc/mmc.h1
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/sctp.h6
-rw-r--r--net/802/tr.c1
-rw-r--r--net/bridge/netfilter/ebt_log.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_gre.c12
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c2
-rw-r--r--net/ipv4/netfilter/ipt_recent.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c2
-rw-r--r--net/ipv6/netfilter/ip6t_eui64.c2
-rw-r--r--net/ipx/af_ipx.c4
-rw-r--r--net/ipx/ipx_route.c2
-rw-r--r--net/netfilter/nfnetlink_log.c4
-rw-r--r--net/sched/sch_generic.c6
-rw-r--r--net/sctp/input.c144
-rw-r--r--net/sctp/sm_sideeffect.c16
-rw-r--r--net/sctp/sm_statefuns.c81
-rw-r--r--net/sctp/socket.c29
62 files changed, 812 insertions, 640 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 421bcfff6ad..43ab119963d 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -57,6 +57,15 @@ Who: Jody McIntyre <scjody@steamballoon.com>
---------------------------
+What: sbp2: module parameter "force_inquiry_hack"
+When: July 2006
+Why: Superceded by parameter "workarounds". Both parameters are meant to be
+ used ad-hoc and for single devices only, i.e. not in modprobe.conf,
+ therefore the impact of this feature replacement should be low.
+Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
+
+---------------------------
+
What: Video4Linux API 1 ioctls and video_decoder.h from Video devices.
When: July 2006
Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6
diff --git a/Documentation/watchdog/watchdog-api.txt b/Documentation/watchdog/watchdog-api.txt
index c5beb548cfc..21ed5117366 100644
--- a/Documentation/watchdog/watchdog-api.txt
+++ b/Documentation/watchdog/watchdog-api.txt
@@ -36,6 +36,9 @@ timeout or margin. The simplest way to ping the watchdog is to write
some data to the device. So a very simple watchdog daemon would look
like this:
+#include <stdlib.h>
+#include <fcntl.h>
+
int main(int argc, const char *argv[]) {
int fd=open("/dev/watchdog",O_WRONLY);
if (fd==-1) {
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 98356f81000..02e188d98e7 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -95,7 +95,10 @@ static void __init mainstone_init_irq(void)
for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
set_irq_chip(irq, &mainstone_irq_chip);
set_irq_handler(irq, do_level_IRQ);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
+ else
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
set_irq_flags(MAINSTONE_IRQ(8), 0);
set_irq_flags(MAINSTONE_IRQ(12), 0);
diff --git a/arch/arm/mach-s3c2410/sleep.S b/arch/arm/mach-s3c2410/sleep.S
index 832fb86a03b..73de2eaca22 100644
--- a/arch/arm/mach-s3c2410/sleep.S
+++ b/arch/arm/mach-s3c2410/sleep.S
@@ -59,8 +59,7 @@ ENTRY(s3c2410_cpu_suspend)
mrc p15, 0, r5, c13, c0, 0 @ PID
mrc p15, 0, r6, c3, c0, 0 @ Domain ID
mrc p15, 0, r7, c2, c0, 0 @ translation table base address
- mrc p15, 0, r8, c2, c0, 0 @ auxiliary control register
- mrc p15, 0, r9, c1, c0, 0 @ control register
+ mrc p15, 0, r8, c1, c0, 0 @ control register
stmia r0, { r4 - r13 }
@@ -165,7 +164,6 @@ ENTRY(s3c2410_cpu_resume)
mcr p15, 0, r5, c13, c0, 0 @ PID
mcr p15, 0, r6, c3, c0, 0 @ Domain ID
mcr p15, 0, r7, c2, c0, 0 @ translation table base
- mcr p15, 0, r8, c1, c1, 0 @ auxilliary control
#ifdef CONFIG_DEBUG_RESUME
mov r3, #'R'
@@ -173,7 +171,7 @@ ENTRY(s3c2410_cpu_resume)
#endif
ldr r2, =resume_with_mmu
- mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, etc
+ mcr p15, 0, r8, c1, c0, 0 @ turn on MMU, etc
nop @ second-to-last before mmu
mov pc, r2 @ go back to virtual address
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index f6a8853cd1b..9ea35398e10 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -134,7 +134,7 @@ CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_NUMA=y
-CONFIG_NODES_SHIFT=8
+CONFIG_NODES_SHIFT=10
CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
@@ -1159,7 +1159,7 @@ CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
CONFIG_DEBUG_PREEMPT=y
-CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 7956eb9058f..d58c1c5c903 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -416,7 +416,7 @@ iosapic_end_level_irq (unsigned int irq)
ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte;
- move_irq(irq);
+ move_native_irq(irq);
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
iosapic_eoi(rte->addr, vec);
}
@@ -458,7 +458,7 @@ iosapic_ack_edge_irq (unsigned int irq)
{
irq_desc_t *idesc = irq_descp(irq);
- move_irq(irq);
+ move_native_irq(irq);
/*
* Once we have recorded IRQ_PENDING already, we can mask the
* interrupt for real. This prevents IRQ storms from unhandled
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 5ce908ef9c9..9c72ea3f643 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -101,7 +101,6 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
if (irq < NR_IRQS) {
irq_affinity[irq] = mask;
- set_irq_info(irq, mask);
irq_redir[irq] = (char) (redir & 0xff);
}
}
diff --git a/drivers/char/watchdog/i8xx_tco.c b/drivers/char/watchdog/i8xx_tco.c
index a13395e2c37..fa2ba9ebe42 100644
--- a/drivers/char/watchdog/i8xx_tco.c
+++ b/drivers/char/watchdog/i8xx_tco.c
@@ -33,11 +33,6 @@
* 82801E (C-ICH) : document number 273599-001, 273645-002,
* 82801EB (ICH5) : document number 252516-001, 252517-003,
* 82801ER (ICH5R) : document number 252516-001, 252517-003,
- * 82801FB (ICH6) : document number 301473-002, 301474-007,
- * 82801FR (ICH6R) : document number 301473-002, 301474-007,
- * 82801FBM (ICH6-M) : document number 301473-002, 301474-007,
- * 82801FW (ICH6W) : document number 301473-001, 301474-007,
- * 82801FRW (ICH6RW) : document number 301473-001, 301474-007
*
* 20000710 Nils Faerber
* Initial Version 0.01
@@ -66,6 +61,10 @@
* 20050807 Wim Van Sebroeck <wim@iguana.be>
* 0.08 Make sure that the watchdog is only "armed" when started.
* (Kernel Bug 4251)
+ * 20060416 Wim Van Sebroeck <wim@iguana.be>
+ * 0.09 Remove support for the ICH6, ICH6R, ICH6-M, ICH6W and ICH6RW and
+ * ICH7 chipsets. (See Kernel Bug 6031 - other code will support these
+ * chipsets)
*/
/*
@@ -90,7 +89,7 @@
#include "i8xx_tco.h"
/* Module and version information */
-#define TCO_VERSION "0.08"
+#define TCO_VERSION "0.09"
#define TCO_MODULE_NAME "i8xx TCO timer"
#define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION
#define PFX TCO_MODULE_NAME ": "
@@ -391,11 +390,6 @@ static struct pci_device_id i8xx_tco_pci_tbl[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }, /* End of list */
};
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index 9dc54736e4e..1ea04e9b2b0 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -423,6 +423,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
if (tmr_atboot && started == 0) {
printk(KERN_INFO PFX "Starting Watchdog Timer\n");
s3c2410wdt_start();
+ } else if (!tmr_atboot) {
+ /* if we're not enabling the watchdog, then ensure it is
+ * disabled if it has been left running from the bootloader
+ * or other source */
+
+ s3c2410wdt_stop();
}
return 0;
diff --git a/drivers/char/watchdog/sc1200wdt.c b/drivers/char/watchdog/sc1200wdt.c
index 515ce757204..20b88f9b7be 100644
--- a/drivers/char/watchdog/sc1200wdt.c
+++ b/drivers/char/watchdog/sc1200wdt.c
@@ -377,7 +377,7 @@ static int __init sc1200wdt_init(void)
{
int ret;
- printk(banner);
+ printk("%s\n", banner);
spin_lock_init(&sc1200wdt_lock);
sema_init(&open_sem, 1);
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 19222878aae..11f13778f13 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -553,7 +553,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
* register content.
* To actually enable physical responses is the job of our interrupt
* handler which programs the physical request filter. */
- reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000);
+ reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000);
DBGMSG("physUpperBoundOffset=%08x",
reg_read(ohci, OHCI1394_PhyUpperBound));
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f4206604db0..8a23fb54c69 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -42,6 +42,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/string.h>
+#include <linux/stringify.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
@@ -117,7 +118,8 @@ MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default
*/
static int max_sectors = SBP2_MAX_SECTORS;
module_param(max_sectors, int, 0444);
-MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)");
+MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
+ __stringify(SBP2_MAX_SECTORS) ")");
/*
* Exclusive login to sbp2 device? In most cases, the sbp2 driver should
@@ -135,18 +137,45 @@ module_param(exclusive_login, int, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
/*
- * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
- * if your sbp2 device is not properly handling the SCSI inquiry command.
- * This hack makes the inquiry look more like a typical MS Windows inquiry
- * by enforcing 36 byte inquiry and avoiding access to mode_sense page 8.
+ * If any of the following workarounds is required for your device to work,
+ * please submit the kernel messages logged by sbp2 to the linux1394-devel
+ * mailing list.
*
- * If force_inquiry_hack=1 is required for your device to work,
- * please submit the logged sbp2_firmware_revision value of this device to
- * the linux1394-devel mailing list.
+ * - 128kB max transfer
+ * Limit transfer size. Necessary for some old bridges.
+ *
+ * - 36 byte inquiry
+ * When scsi_mod probes the device, let the inquiry command look like that
+ * from MS Windows.
+ *
+ * - skip mode page 8
+ * Suppress sending of mode_sense for mode page 8 if the device pretends to
+ * support the SCSI Primary Block commands instead of Reduced Block Commands.
+ *
+ * - fix capacity
+ * Tell sd_mod to correct the last sector number reported by read_capacity.
+ * Avoids access beyond actual disk limits on devices with an off-by-one bug.
+ * Don't use this with devices which don't have this bug.
+ *
+ * - override internal blacklist
+ * Instead of adding to the built-in blacklist, use only the workarounds
+ * specified in the module load parameter.
+ * Useful if a blacklist entry interfered with a non-broken device.
*/
+static int sbp2_default_workarounds;
+module_param_named(workarounds, sbp2_default_workarounds, int, 0644);
+MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
+ ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
+ ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
+ ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
+ ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
+ ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
+ ", or a combination)");
+
+/* legacy parameter */
static int force_inquiry_hack;
module_param(force_inquiry_hack, int, 0644);
-MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
+MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'");
/*
* Export information about protocols/devices supported by this driver.
@@ -266,14 +295,55 @@ static struct hpsb_protocol_driver sbp2_driver = {
};
/*
- * List of device firmwares that require the inquiry hack.
- * Yields a few false positives but did not break other devices so far.
+ * List of devices with known bugs.
+ *
+ * The firmware_revision field, masked with 0xffff00, is the best indicator
+ * for the type of bridge chip of a device. It yields a few false positives
+ * but this did not break correctly behaving devices so far.
*/
-static u32 sbp2_broken_inquiry_list[] = {
- 0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */
- /* DViCO Momobay CX-1 */
- 0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */
- /* QPS Fire DVDBurner */
+static const struct {
+ u32 firmware_revision;
+ u32 model_id;
+ unsigned workarounds;
+} sbp2_workarounds_table[] = {
+ /* TSB42AA9 */ {
+ .firmware_revision = 0x002800,
+ .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
+ SBP2_WORKAROUND_MODE_SENSE_8,
+ },
+ /* Initio bridges, actually only needed for some older ones */ {
+ .firmware_revision = 0x000200,
+ .workarounds = SBP2_WORKAROUND_INQUIRY_36,
+ },
+ /* Symbios bridge */ {
+ .firmware_revision = 0xa0b800,
+ .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
+ },
+ /*
+ * Note about the following Apple iPod blacklist entries:
+ *
+ * There are iPods (2nd gen, 3rd gen) with model_id==0. Since our
+ * matching logic treats 0 as a wildcard, we cannot match this ID
+ * without rewriting the matching routine. Fortunately these iPods
+ * do not feature the read_capacity bug according to one report.
+ * Read_capacity behaviour as well as model_id could change due to
+ * Apple-supplied firmware updates though.
+ */
+ /* iPod 4th generation */ {
+ .firmware_revision = 0x0a2700,
+ .model_id = 0x000021,
+ .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
+ },
+ /* iPod mini */ {
+ .firmware_revision = 0x0a2700,
+ .model_id = 0x000023,
+ .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
+ },
+ /* iPod Photo */ {
+ .firmware_revision = 0x0a2700,
+ .model_id = 0x00007e,
+ .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
+ }
};
/**************************************
@@ -765,11 +835,16 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
/* Register the status FIFO address range. We could use the same FIFO
* for targets at different nodes. However we need different FIFOs per
- * target in order to support multi-unit devices. */
+ * target in order to support multi-unit devices.
+ * The FIFO is located out of the local host controller's physical range
+ * but, if possible, within the posted write area. Status writes will
+ * then be performed as unified transactions. This slightly reduces
+ * bandwidth usage, and some Prolific based devices seem to require it.
+ */
scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
&sbp2_highlevel, ud->ne->host, &sbp2_ops,
sizeof(struct sbp2_status_block), sizeof(quadlet_t),
- ~0ULL, ~0ULL);
+ 0x010000000000ULL, CSR1212_ALL_SPACE_END);
if (!scsi_id->status_fifo_addr) {
SBP2_ERR("failed to allocate status FIFO address range");
goto failed_alloc;
@@ -1450,7 +1525,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
struct csr1212_dentry *dentry;
u64 management_agent_addr;
u32 command_set_spec_id, command_set, unit_characteristics,
- firmware_revision, workarounds;
+ firmware_revision;
+ unsigned workarounds;
int i;
SBP2_DEBUG_ENTER();
@@ -1506,12 +1582,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
case SBP2_FIRMWARE_REVISION_KEY:
/* Firmware revision */
firmware_revision = kv->value.immediate;
- if (force_inquiry_hack)
- SBP2_INFO("sbp2_firmware_revision = %x",
- (unsigned int)firmware_revision);
- else
- SBP2_DEBUG("sbp2_firmware_revision = %x",
- (unsigned int)firmware_revision);
+ SBP2_DEBUG("sbp2_firmware_revision = %x",
+ (unsigned int)firmware_revision);
break;
default:
@@ -1519,41 +1591,44 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
}
}
- /* This is the start of our broken device checking. We try to hack
- * around oddities and known defects. */
- workarounds = 0x0;
+ workarounds = sbp2_default_workarounds;
+ if (force_inquiry_hack) {
+ SBP2_WARN("force_inquiry_hack is deprecated. "
+ "Use parameter 'workarounds' instead.");
+ workarounds |= SBP2_WORKAROUND_INQUIRY_36;
+ }
- /* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a
- * bridge with 128KB max transfer size limitation. For sanity, we
- * only voice this when the current max_sectors setting
- * exceeds the 128k limit. By default, that is not the case.
- *
- * It would be really nice if we could detect this before the scsi
- * host gets initialized. That way we can down-force the
- * max_sectors to account for it. That is not currently
- * possible. */
- if ((firmware_revision & 0xffff00) ==
- SBP2_128KB_BROKEN_FIRMWARE &&
- (max_sectors * 512) > (128*1024)) {
- SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
- NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
- SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!",
- max_sectors);
- workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER;
- }
-
- /* Check for a blacklisted set of devices that require us to force
- * a 36 byte host inquiry. This can be overriden as a module param
- * (to force all hosts). */
- for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) {
- if ((firmware_revision & 0xffff00) ==
- sbp2_broken_inquiry_list[i]) {
- SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
- NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
- workarounds |= SBP2_BREAKAGE_INQUIRY_HACK;
- break; /* No need to continue. */
+ if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
+ for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
+ if (sbp2_workarounds_table[i].firmware_revision &&
+ sbp2_workarounds_table[i].firmware_revision !=
+ (firmware_revision & 0xffff00))
+ continue;
+ if (sbp2_workarounds_table[i].model_id &&
+ sbp2_workarounds_table[i].model_id != ud->model_id)
+ continue;
+ workarounds |= sbp2_workarounds_table[i].workarounds;
+ break;
}
- }
+
+ if (workarounds)
+ SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x "
+ "(firmware_revision 0x%06x, vendor_id 0x%06x,"
+ " model_id 0x%06x)",
+ NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
+ workarounds, firmware_revision,
+ ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
+ ud->model_id);
+
+ /* We would need one SCSI host template for each target to adjust
+ * max_sectors on the fly, therefore warn only. */
+ if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
+ (max_sectors * 512) > (128 * 1024))
+ SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
+ "max transfer size. WARNING: Current max_sectors "
+ "setting is larger than 128KB (%d sectors)",
+ NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
+ max_sectors);
/* If this is a logical unit directory entry, process the parent
* to get the values. */
@@ -2447,19 +2522,25 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
scsi_id->sdev = sdev;
- if (force_inquiry_hack ||
- scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) {
+ if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36;
- sdev->skip_ms_page_8 = 1;
- }
return 0;
}
static int sbp2scsi_slave_configure(struct scsi_device *sdev)
{
+ struct scsi_id_instance_data *scsi_id =
+ (struct scsi_id_instance_data *)sdev->host->hostdata[0];
+
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
+
+ if (sdev->type == TYPE_DISK &&
+ scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
+ sdev->skip_ms_page_8 = 1;
+ if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
+ sdev->fix_capacity = 1;
return 0;
}
@@ -2603,7 +2684,9 @@ static int sbp2_module_init(void)
scsi_driver_template.cmd_per_lun = 1;
}
- /* Set max sectors (module load option). Default is 255 sectors. */
+ if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
+ (max_sectors * 512) > (128 * 1024))
+ max_sectors = 128 * 1024 / 512;
scsi_driver_template.max_sectors = max_sectors;
/* Register our high level driver with 1394 stack */
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index e2d357a9ea3..f4ccc9d0fba 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -227,11 +227,6 @@ struct sbp2_status_block {
#define SBP2_SW_VERSION_ENTRY 0x00010483
/*
- * Other misc defines
- */
-#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
-
-/*
* SCSI specific stuff
*/
@@ -239,6 +234,13 @@ struct sbp2_status_block {
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
#define SBP2_MAX_CMDS 8 /* This should be safe */
+/* Flags for detected oddities and brokeness */
+#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
+#define SBP2_WORKAROUND_INQUIRY_36 0x2
+#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
+#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
+#define SBP2_WORKAROUND_OVERRIDE 0x100
+
/* This is the two dma types we use for cmd_dma below */
enum cmd_dma_types {
CMD_DMA_NONE,
@@ -268,10 +270,6 @@ struct sbp2_command_info {
};
-/* A list of flags for detected oddities and brokeness. */
-#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1
-#define SBP2_BREAKAGE_INQUIRY_HACK 0x2
-
struct sbp2scsi_host_info;
/*
@@ -345,7 +343,7 @@ struct scsi_id_instance_data {
struct Scsi_Host *scsi_host;
/* Device specific workarounds/brokeness */
- u32 workarounds;
+ unsigned workarounds;
};
/* Sbp2 host data structure (one per IEEE1394 host) */
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index 36a32c31566..efe147dbeb4 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -211,8 +211,10 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
*/
work = kmalloc(sizeof *work, GFP_KERNEL);
- if (!work)
+ if (!work) {
+ mmput(mm);
return;
+ }
INIT_WORK(&work->work, ib_umem_account, work);
work->mm = mm;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 1985b5dfa48..798e13e14fa 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -182,7 +182,7 @@ struct mthca_cmd_context {
u8 status;
};
-static int fw_cmd_doorbell = 1;
+static int fw_cmd_doorbell = 0;
module_param(fw_cmd_doorbell, int, 0644);
MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero "
"(and supported by FW)");
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 19765f6f8d5..07c13be07a4 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1727,23 +1727,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
ind = qp->rq.next_ind;
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
- nreq = 0;
-
- doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
- doorbell[1] = cpu_to_be32(qp->qpn << 8);
-
- wmb();
-
- mthca_write64(doorbell,
- dev->kar + MTHCA_RECEIVE_DOORBELL,
- MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
-
- qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
- size0 = 0;
- }
-
+ for (nreq = 0; wr; wr = wr->next) {
if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
mthca_err(dev, "RQ %06x full (%u head, %u tail,"
" %d max, %d nreq)\n", qp->qpn,
@@ -1797,6 +1781,23 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
++ind;
if (unlikely(ind >= qp->rq.max))
ind -= qp->rq.max;
+
+ ++nreq;
+ if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+ nreq = 0;
+
+ doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
+ doorbell[1] = cpu_to_be32(qp->qpn << 8);
+
+ wmb();
+
+ mthca_write64(doorbell,
+ dev->kar + MTHCA_RECEIVE_DOORBELL,
+ MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+ qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
+ size0 = 0;
+ }
}
out:
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index c32ce4348e1..9cbdffa08dc 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -340,7 +340,10 @@ static void srp_disconnect_target(struct srp_target_port *target)
/* XXX should send SRP_I_LOGOUT request */
init_completion(&target->done);
- ib_send_cm_dreq(target->cm_id, NULL, 0);
+ if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
+ printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
+ return;
+ }
wait_for_completion(&target->done);
}
@@ -351,7 +354,6 @@ static void srp_remove_work(void *target_ptr)
spin_lock_irq(target->scsi_host->host_lock);
if (target->state != SRP_TARGET_DEAD) {
spin_unlock_irq(target->scsi_host->host_lock);
- scsi_host_put(target->scsi_host);
return;
}
target->state = SRP_TARGET_REMOVED;
@@ -365,8 +367,6 @@ static void srp_remove_work(void *target_ptr)
ib_destroy_cm_id(target->cm_id);
srp_free_target_ib(target);
scsi_host_put(target->scsi_host);
- /* And another put to really free the target port... */
- scsi_host_put(target->scsi_host);
}
static int srp_connect_target(struct srp_target_port *target)
@@ -1241,7 +1241,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
list_for_each_entry_safe(req, tmp, &target->req_queue, list)
if (req->scmnd->device == scmnd->device) {
req->scmnd->result = DID_RESET << 16;
- scmnd->scsi_done(scmnd);
+ req->scmnd->scsi_done(req->scmnd);
srp_remove_req(target, req);
}
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index 914d62b2406..5dc4bee7abe 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -310,7 +310,7 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
}
else
data->bytes_xfered =
- (data->blocks * (1 << data->blksz_bits)) -
+ (data->blocks * data->blksz) -
host->pio.len;
}
@@ -575,7 +575,7 @@ static int
au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
{
- int datalen = data->blocks * (1 << data->blksz_bits);
+ int datalen = data->blocks * data->blksz;
if (dma != 0)
host->flags |= HOST_F_DMA;
@@ -596,7 +596,7 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
if (host->dma.len == 0)
return MMC_ERR_TIMEOUT;
- au_writel((1 << data->blksz_bits) - 1, HOST_BLKSIZE(host));
+ au_writel(data->blksz - 1, HOST_BLKSIZE(host));
if (host->flags & HOST_F_DMA) {
int i;
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index 79358e223f5..a4eb1d0e7a7 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -218,8 +218,10 @@ static int imxmci_busy_wait_for_status(struct imxmci_host *host,
if(!loops)
return 0;
- dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
- loops, where, *pstat, stat_mask);
+ /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
+ if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000))
+ dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
+ loops, where, *pstat, stat_mask);
return loops;
}
@@ -333,6 +335,9 @@ static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd,
WARN_ON(host->cmd != NULL);
host->cmd = cmd;
+ /* Ensure, that clock are stopped else command programming and start fails */
+ imxmci_stop_clock(host);
+
if (cmd->flags & MMC_RSP_BUSY)
cmdat |= CMD_DAT_CONT_BUSY;
@@ -553,7 +558,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
int trans_done = 0;
unsigned int stat = *pstat;
- if(host->actual_bus_width == MMC_BUS_WIDTH_4)
+ if(host->actual_bus_width != MMC_BUS_WIDTH_4)
burst_len = 16;
else
burst_len = 64;
@@ -591,8 +596,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
stat = MMC_STATUS;
/* Flush extra bytes from FIFO */
- while(flush_len >= 2){
- flush_len -= 2;
+ while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){
i = MMC_BUFFER_ACCESS;
stat = MMC_STATUS;
stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */
@@ -746,10 +750,6 @@ static void imxmci_tasklet_fnc(unsigned long data)
data_dir_mask = STATUS_DATA_TRANS_DONE;
}
- imxmci_busy_wait_for_status(host, &stat,
- data_dir_mask,
- 50, "imxmci_tasklet_fnc data");
-
if(stat & data_dir_mask) {
clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
imxmci_data_done(host, stat);
@@ -865,7 +865,11 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
imxmci_stop_clock(host);
MMC_CLK_RATE = (prescaler<<3) | clk;
- imxmci_start_clock(host);
+ /*
+ * Under my understanding, clock should not be started there, because it would
+ * initiate SDHC sequencer and send last or random command into card
+ */
+ /*imxmci_start_clock(host);*/
dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE);
} else {
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 1ca2c8b9c9b..6201f3086a0 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -951,6 +951,7 @@ static void mmc_read_scrs(struct mmc_host *host)
data.timeout_ns = card->csd.tacc_ns * 10;
data.timeout_clks = card->csd.tacc_clks * 10;
data.blksz_bits = 3;
+ data.blksz = 1 << 3;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 06bd1f4cb9b..e39cc05c64c 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -175,6 +175,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.data.timeout_ns = card->csd.tacc_ns * 10;
brq.data.timeout_clks = card->csd.tacc_clks * 10;
brq.data.blksz_bits = md->block_bits;
+ brq.data.blksz = 1 << md->block_bits;
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
brq.stop.opcode = MMC_STOP_TRANSMISSION;
brq.stop.arg = 0;
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index f97b472085c..b49368fd96b 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -119,7 +119,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
nob = 0xffff;
writel(nob, host->base + MMC_NOB);
- writel(1 << data->blksz_bits, host->base + MMC_BLKLEN);
+ writel(data->blksz, host->base + MMC_BLKLEN);
clks = (unsigned long long)data->timeout_ns * CLOCKRATE;
do_div(clks, 1000000000UL);
@@ -283,7 +283,7 @@ static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
* data blocks as being in error.
*/
if (data->error == MMC_ERR_NONE)
- data->bytes_xfered = data->blocks << data->blksz_bits;
+ data->bytes_xfered = data->blocks * data->blksz;
else
data->bytes_xfered = 0;
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 39b3d97f891..8167332d401 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -662,14 +662,14 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
unsigned long dmaflags;
DBGF("blksz %04x blks %04x flags %08x\n",
- 1 << data->blksz_bits, data->blocks, data->flags);
+ data->blksz, data->blocks, data->flags);
DBGF("tsac %d ms nsac %d clk\n",
data->timeout_ns / 1000000, data->timeout_clks);
/*
* Calculate size.
*/
- host->size = data->blocks << data->blksz_bits;
+ host->size = data->blocks * data->blksz;
/*
* Check timeout values for overflow.
@@ -696,12 +696,12 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
* Two bytes are needed for each data line.
*/
if (host->bus_width == MMC_BUS_WIDTH_1) {
- blksize = (1 << data->blksz_bits) + 2;
+ blksize = data->blksz + 2;
wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
} else if (host->bus_width == MMC_BUS_WIDTH_4) {
- blksize = (1 << data->blksz_bits) + 2 * 4;
+ blksize = data->blksz + 2 * 4;
wbsd_write_index(host, WBSD_IDX_PBSMSB,
((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index f7235c9bc42..7e078b4cca7 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -106,7 +106,6 @@
* 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
* 0.52: 20 Jan 2006: Add MSI/MSIX support.
* 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
- * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -118,7 +117,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.54"
+#define FORCEDETH_VERSION "0.53"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@@ -711,72 +710,6 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
}
}
-static int using_multi_irqs(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
- return 0;
- else
- return 1;
-}
-
-static void nv_enable_irq(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- enable_irq(dev->irq);
- } else {
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
- }
-}
-
-static void nv_disable_irq(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- disable_irq(dev->irq);
- } else {
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
- }
-}
-
-/* In MSIX mode, a write to irqmask behaves as XOR */
-static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
-{
- u8 __iomem *base = get_hwbase(dev);
-
- writel(mask, base + NvRegIrqMask);
-}
-
-static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
-{
- struct fe_priv *np = get_nvpriv(dev);
- u8 __iomem *base = get_hwbase(dev);
-
- if (np->msi_flags & NV_MSI_X_ENABLED) {
- writel(mask, base + NvRegIrqMask);
- } else {
- if (np->msi_flags & NV_MSI_ENABLED)
- writel(0, base + NvRegMSIIrqMask);
- writel(0, base + NvRegIrqMask);
- }
-}
-
#define MII_READ (-1)
/* mii_rw: read/write a register on the PHY.
*
@@ -1086,25 +1019,24 @@ static void nv_do_rx_refill(unsigned long data)
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- disable_irq(dev->irq);
+
+ if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+ ((np->msi_flags & NV_MSI_X_ENABLED) &&
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+ disable_irq(dev->irq);
} else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
if (nv_alloc_rx(dev)) {
- spin_lock_irq(&np->lock);
+ spin_lock(&np->lock);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock_irq(&np->lock);
+ spin_unlock(&np->lock);
}
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- enable_irq(dev->irq);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+ ((np->msi_flags & NV_MSI_X_ENABLED) &&
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+ enable_irq(dev->irq);
} else {
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
@@ -1736,7 +1668,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* guessed, there is probably a simpler approach.
* Changing the MTU is a rare event, it shouldn't matter.
*/
- nv_disable_irq(dev);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+ ((np->msi_flags & NV_MSI_X_ENABLED) &&
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+ disable_irq(dev->irq);
+ } else {
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ }
spin_lock_bh(&dev->xmit_lock);
spin_lock(&np->lock);
/* stop engines */
@@ -1769,7 +1709,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
nv_start_tx(dev);
spin_unlock(&np->lock);
spin_unlock_bh(&dev->xmit_lock);
- nv_enable_irq(dev);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+ ((np->msi_flags & NV_MSI_X_ENABLED) &&
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+ enable_irq(dev->irq);
+ } else {
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ }
}
return 0;
}
@@ -2160,16 +2108,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
if (!(events & np->irqmask))
break;
- spin_lock_irq(&np->lock);
+ spin_lock(&np->lock);
nv_tx_done(dev);
- spin_unlock_irq(&np->lock);
+ spin_unlock(&np->lock);
if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
dev->name, events);
}
if (i > max_interrupt_work) {
- spin_lock_irq(&np->lock);
+ spin_lock(&np->lock);
/* disable interrupts on the nic */
writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
pci_push(base);
@@ -2179,7 +2127,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
- spin_unlock_irq(&np->lock);
+ spin_unlock(&np->lock);
break;
}
@@ -2209,14 +2157,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
nv_rx_process(dev);
if (nv_alloc_rx(dev)) {
- spin_lock_irq(&np->lock);
+ spin_lock(&np->lock);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock_irq(&np->lock);
+ spin_unlock(&np->lock);
}
if (i > max_interrupt_work) {
- spin_lock_irq(&np->lock);
+ spin_lock(&np->lock);
/* disable interrupts on the nic */
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
pci_push(base);
@@ -2226,7 +2174,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
- spin_unlock_irq(&np->lock);
+ spin_unlock(&np->lock);
break;
}
@@ -2255,14 +2203,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
break;
if (events & NVREG_IRQ_LINK) {
- spin_lock_irq(&np->lock);
+ spin_lock(&np->lock);
nv_link_irq(dev);
- spin_unlock_irq(&np->lock);
+ spin_unlock(&np->lock);
}
if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
- spin_lock_irq(&np->lock);
+ spin_lock(&np->lock);
nv_linkchange(dev);
- spin_unlock_irq(&np->lock);
+ spin_unlock(&np->lock);
np->link_timeout = jiffies + LINK_TIMEOUT;
}
if (events & (NVREG_IRQ_UNKNOWN)) {
@@ -2270,7 +2218,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
dev->name, events);
}
if (i > max_interrupt_work) {
- spin_lock_irq(&np->lock);
+ spin_lock(&np->lock);
/* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base);
@@ -2280,7 +2228,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
- spin_unlock_irq(&np->lock);
+ spin_unlock(&np->lock);
break;
}
@@ -2303,11 +2251,10 @@ static void nv_do_nic_poll(unsigned long data)
* nv_nic_irq because that may decide to do otherwise
*/
- if (!using_multi_irqs(dev)) {
- if (np->msi_flags & NV_MSI_X_ENABLED)
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- disable_irq(dev->irq);
+ if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+ ((np->msi_flags & NV_MSI_X_ENABLED) &&
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+ disable_irq(dev->irq);
mask = np->irqmask;
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
@@ -2330,12 +2277,11 @@ static void nv_do_nic_poll(unsigned long data)
writel(mask, base + NvRegIrqMask);
pci_push(base);
- if (!using_multi_irqs(dev)) {
+ if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+ ((np->msi_flags & NV_MSI_X_ENABLED) &&
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
- if (np->msi_flags & NV_MSI_X_ENABLED)
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
- else
- enable_irq(dev->irq);
+ enable_irq(dev->irq);
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
@@ -2682,113 +2628,6 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
}
-static int nv_request_irq(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
- u8 __iomem *base = get_hwbase(dev);
- int ret = 1;
- int i;
-
- if (np->msi_flags & NV_MSI_X_CAPABLE) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
- np->msi_x_entry[i].entry = i;
- }
- if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
- np->msi_flags |= NV_MSI_X_ENABLED;
- if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
- /* Request irq for rx handling */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_err;
- }
- /* Request irq for tx handling */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_free_rx;
- }
- /* Request irq for link and timer handling */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_free_tx;
- }
- /* map interrupts to their respective vector */
- writel(0, base + NvRegMSIXMap0);
- writel(0, base + NvRegMSIXMap1);
- set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
- set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
- set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
- } else {
- /* Request irq for all interrupts */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- goto out_err;
- }
-
- /* map interrupts to vector 0 */
- writel(0, base + NvRegMSIXMap0);
- writel(0, base + NvRegMSIXMap1);
- }
- }
- }
- if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
- if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
- np->msi_flags |= NV_MSI_ENABLED;
- if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
- printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
- pci_disable_msi(np->pci_dev);
- np->msi_flags &= ~NV_MSI_ENABLED;
- goto out_err;
- }
-
- /* map interrupts to vector 0 */
- writel(0, base + NvRegMSIMap0);
- writel(0, base + NvRegMSIMap1);
- /* enable msi vector 0 */
- writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
- }
- }
- if (ret != 0) {
- if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
- goto out_err;
- }
-
- return 0;
-out_free_tx:
- free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
-out_free_rx:
- free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
-out_err:
- return 1;
-}
-
-static void nv_free_irq(struct net_device *dev)
-{
- struct fe_priv *np = get_nvpriv(dev);
- int i;
-
- if (np->msi_flags & NV_MSI_X_ENABLED) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
- free_irq(np->msi_x_entry[i].vector, dev);
- }
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- } else {
- free_irq(np->pci_dev->irq, dev);
- if (np->msi_flags & NV_MSI_ENABLED) {
- pci_disable_msi(np->pci_dev);
- np->msi_flags &= ~NV_MSI_ENABLED;
- }
- }
-}
-
static int nv_open(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
@@ -2881,16 +2720,12 @@ static int nv_open(struct net_device *dev)
udelay(10);
writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
- nv_disable_hw_interrupts(dev, np->irqmask);
+ writel(0, base + NvRegIrqMask);
pci_push(base);
writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base);
- if (nv_request_irq(dev)) {
- goto out_drain;
- }
-
if (np->msi_flags & NV_MSI_X_CAPABLE) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
np->msi_x_entry[i].entry = i;
@@ -2964,7 +2799,7 @@ static int nv_open(struct net_device *dev)
}
/* ask for interrupts */
- nv_enable_hw_interrupts(dev, np->irqmask);
+ writel(np->irqmask, base + NvRegIrqMask);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
@@ -3008,6 +2843,7 @@ static int nv_close(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base;
+ int i;
spin_lock_irq(&np->lock);
np->in_shutdown = 1;
@@ -3025,13 +2861,31 @@ static int nv_close(struct net_device *dev)
/* disable interrupts on the nic or we will lock up */
base = get_hwbase(dev);
- nv_disable_hw_interrupts(dev, np->irqmask);
+ if (np->msi_flags & NV_MSI_X_ENABLED) {
+ writel(np->irqmask, base + NvRegIrqMask);
+ } else {
+ if (np->msi_flags & NV_MSI_ENABLED)
+ writel(0, base + NvRegMSIIrqMask);
+ writel(0, base + NvRegIrqMask);
+ }
pci_push(base);
dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
spin_unlock_irq(&np->lock);
- nv_free_irq(dev);
+ if (np->msi_flags & NV_MSI_X_ENABLED) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ free_irq(np->msi_x_entry[i].vector, dev);
+ }
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ } else {
+ free_irq(np->pci_dev->irq, dev);
+ if (np->msi_flags & NV_MSI_ENABLED) {
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ }
+ }
drain_ring(dev);
@@ -3120,18 +2974,20 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (id->driver_data & DEV_HAS_HIGH_DMA) {
/* packet format 3: supports 40-bit addressing */
np->desc_ver = DESC_VER_3;
- np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
pci_name(pci_dev));
} else {
- dev->features |= NETIF_F_HIGHDMA;
- printk(KERN_INFO "forcedeth: using HIGHDMA\n");
- }
- if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
- printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
- pci_name(pci_dev));
+ if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
+ printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
+ pci_name(pci_dev));
+ goto out_relreg;
+ } else {
+ dev->features |= NETIF_F_HIGHDMA;
+ printk(KERN_INFO "forcedeth: using HIGHDMA\n");
+ }
}
+ np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
} else if (id->driver_data & DEV_HAS_LARGEDESC) {
/* packet format 2: supports jumbo frames */
np->desc_ver = DESC_VER_2;
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
index 6f7dce8eba5..b67f586d739 100644
--- a/drivers/net/ixp2000/enp2611.c
+++ b/drivers/net/ixp2000/enp2611.c
@@ -149,6 +149,8 @@ static void enp2611_check_link_status(unsigned long __dummy)
int status;
dev = nds[i];
+ if (dev == NULL)
+ continue;
status = pm3386_is_link_up(i);
if (status && !netif_carrier_ok(dev)) {
@@ -191,6 +193,7 @@ static void enp2611_set_port_admin_status(int port, int up)
static int __init enp2611_init_module(void)
{
+ int ports;
int i;
if (!machine_is_enp2611())
@@ -199,7 +202,8 @@ static int __init enp2611_init_module(void)
caleb_reset();
pm3386_reset();
- for (i = 0; i < 3; i++) {
+ ports = pm3386_port_count();
+ for (i = 0; i < ports; i++) {
nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
if (nds[i] == NULL) {
while (--i >= 0)
@@ -215,9 +219,10 @@ static int __init enp2611_init_module(void)
ixp2400_msf_init(&enp2611_msf_parameters);
- if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) {
- for (i = 0; i < 3; i++)
- free_netdev(nds[i]);
+ if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
+ for (i = 0; i < ports; i++)
+ if (nds[i])
+ free_netdev(nds[i]);
return -EINVAL;
}
diff --git a/drivers/net/ixp2000/pm3386.c b/drivers/net/ixp2000/pm3386.c
index 5c7ab756405..5224651c9aa 100644
--- a/drivers/net/ixp2000/pm3386.c
+++ b/drivers/net/ixp2000/pm3386.c
@@ -86,40 +86,53 @@ static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
pm3386_reg_write(port >> 1, reg, value);
}
+int pm3386_secondary_present(void)
+{
+ return pm3386_reg_read(1, 0) == 0x3386;
+}
void pm3386_reset(void)
{
u8 mac[3][6];
+ int secondary;
+
+ secondary = pm3386_secondary_present();
/* Save programmed MAC addresses. */
pm3386_get_mac(0, mac[0]);
pm3386_get_mac(1, mac[1]);
- pm3386_get_mac(2, mac[2]);
+ if (secondary)
+ pm3386_get_mac(2, mac[2]);
/* Assert analog and digital reset. */
pm3386_reg_write(0, 0x002, 0x0060);
- pm3386_reg_write(1, 0x002, 0x0060);
+ if (secondary)
+ pm3386_reg_write(1, 0x002, 0x0060);
mdelay(1);
/* Deassert analog reset. */
pm3386_reg_write(0, 0x002, 0x0062);
- pm3386_reg_write(1, 0x002, 0x0062);
+ if (secondary)
+ pm3386_reg_write(1, 0x002, 0x0062);
mdelay(10);
/* Deassert digital reset. */
pm3386_reg_write(0, 0x002, 0x0063);
- pm3386_reg_write(1, 0x002, 0x0063);
+ if (secondary)
+ pm3386_reg_write(1, 0x002, 0x0063);
mdelay(10);
/* Restore programmed MAC addresses. */
pm3386_set_mac(0, mac[0]);
pm3386_set_mac(1, mac[1]);
- pm3386_set_mac(2, mac[2]);
+ if (secondary)
+ pm3386_set_mac(2, mac[2]);
/* Disable carrier on all ports. */
pm3386_set_carrier(0, 0);
pm3386_set_carrier(1, 0);
- pm3386_set_carrier(2, 0);
+ if (secondary)
+ pm3386_set_carrier(2, 0);
}
static u16 swaph(u16 x)
@@ -127,6 +140,11 @@ static u16 swaph(u16 x)
return ((x << 8) | (x >> 8)) & 0xffff;
}
+int pm3386_port_count(void)
+{
+ return 2 + pm3386_secondary_present();
+}
+
void pm3386_init_port(int port)
{
int pm = port >> 1;
diff --git a/drivers/net/ixp2000/pm3386.h b/drivers/net/ixp2000/pm3386.h
index fe92bb056ac..cc4183dca91 100644
--- a/drivers/net/ixp2000/pm3386.h
+++ b/drivers/net/ixp2000/pm3386.h
@@ -13,6 +13,7 @@
#define __PM3386_H
void pm3386_reset(void);
+int pm3386_port_count(void);
void pm3386_init_port(int port);
void pm3386_get_mac(int port, u8 *mac);
void pm3386_set_mac(int port, u8 *mac);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index a70c2b0cc10..63a6b3dfc09 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -78,8 +78,7 @@ static const struct pci_device_id skge_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
{ PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index ffd267fab21..60779ebf2ff 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.3"
+#define DRV_VERSION "1.4"
#define PFX DRV_NAME " "
/*
@@ -105,6 +105,7 @@ MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms)
static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
@@ -235,6 +236,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
}
if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
sky2_pci_write32(hw, PCI_DEV_REG3, 0);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
reg1 &= P_ASPM_CONTROL_MSK;
@@ -306,7 +308,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
if (sky2->autoneg == AUTONEG_ENABLE &&
- (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
+ !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -1020,7 +1022,25 @@ static int sky2_up(struct net_device *dev)
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
u32 ramsize, rxspace, imask;
- int err = -ENOMEM;
+ int cap, err = -ENOMEM;
+ struct net_device *otherdev = hw->dev[sky2->port^1];
+
+ /*
+ * On dual port PCI-X card, there is an problem where status
+ * can be received out of order due to split transactions
+ */
+ if (otherdev && netif_running(otherdev) &&
+ (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
+ struct sky2_port *osky2 = netdev_priv(otherdev);
+ u16 cmd;
+
+ cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
+ cmd &= ~PCI_X_CMD_MAX_SPLIT;
+ sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
+
+ sky2->rx_csum = 0;
+ osky2->rx_csum = 0;
+ }
if (netif_msg_ifup(sky2))
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
@@ -1899,6 +1919,12 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
}
}
+/* Is status ring empty or is there more to do? */
+static inline int sky2_more_work(const struct sky2_hw *hw)
+{
+ return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX));
+}
+
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do)
{
@@ -2171,19 +2197,19 @@ static int sky2_poll(struct net_device *dev0, int *budget)
if (status & Y2_IS_CHK_TXA2)
sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
- if (status & Y2_IS_STAT_BMU)
- sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
-
work_done = sky2_status_intr(hw, work_limit);
*budget -= work_done;
dev0->quota -= work_done;
- if (work_done >= work_limit)
+ if (status & Y2_IS_STAT_BMU)
+ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+
+ if (sky2_more_work(hw))
return 1;
netif_rx_complete(dev0);
- status = sky2_read32(hw, B0_Y2_SP_LISR);
+ sky2_read32(hw, B0_Y2_SP_LISR);
return 0;
}
@@ -3067,12 +3093,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
sky2->duplex = -1;
sky2->speed = -1;
sky2->advertising = sky2_supported_modes(hw);
-
- /* Receive checksum disabled for Yukon XL
- * because of observed problems with incorrect
- * values when multiple packets are received in one interrupt
- */
- sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
+ sky2->rx_csum = 1;
spin_lock_init(&sky2->phy_lock);
sky2->tx_pending = TX_DEF_PENDING;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 8012994c9b9..8a0bc5525f0 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -214,6 +214,8 @@ enum csr_regs {
enum {
Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */
Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */
+ Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */
+ Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */
Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */
Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */
Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index a6dc53b4250..fdc21037f6d 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -491,8 +491,6 @@ struct rhine_private {
u8 tx_thresh, rx_thresh;
struct mii_if_info mii_if;
- struct work_struct tx_timeout_task;
- struct work_struct check_media_task;
void __iomem *base;
};
@@ -500,8 +498,6 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
static void rhine_tx_timeout(struct net_device *dev);
-static void rhine_tx_timeout_task(struct net_device *dev);
-static void rhine_check_media_task(struct net_device *dev);
static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static void rhine_tx(struct net_device *dev);
@@ -856,12 +852,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
- INIT_WORK(&rp->tx_timeout_task,
- (void (*)(void *))rhine_tx_timeout_task, dev);
-
- INIT_WORK(&rp->check_media_task,
- (void (*)(void *))rhine_check_media_task, dev);
-
/* dev->name not defined before register_netdev()! */
rc = register_netdev(dev);
if (rc)
@@ -1108,11 +1098,6 @@ static void rhine_set_carrier(struct mii_if_info *mii)
netif_carrier_ok(mii->dev));
}
-static void rhine_check_media_task(struct net_device *dev)
-{
- rhine_check_media(dev, 0);
-}
-
static void init_registers(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -1166,8 +1151,8 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
if (quirks & rqRhineI) {
iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
- /* Do not call from ISR! */
- msleep(1);
+ /* Can be called from ISR. Evil. */
+ mdelay(1);
/* 0x80 must be set immediately before turning it off */
iowrite8(0x80, ioaddr + MIICmd);
@@ -1257,16 +1242,6 @@ static int rhine_open(struct net_device *dev)
static void rhine_tx_timeout(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
-
- /*
- * Move bulk of work outside of interrupt context
- */
- schedule_work(&rp->tx_timeout_task);
-}
-
-static void rhine_tx_timeout_task(struct net_device *dev)
-{
- struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
@@ -1677,7 +1652,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
spin_lock(&rp->lock);
if (intr_status & IntrLinkChange)
- schedule_work(&rp->check_media_task);
+ rhine_check_media(dev, 0);
if (intr_status & IntrStatsMax) {
rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
@@ -1927,9 +1902,6 @@ static int rhine_close(struct net_device *dev)
spin_unlock_irq(&rp->lock);
free_irq(rp->pdev->irq, dev);
-
- flush_scheduled_work();
-
free_rbufs(dev);
free_tbufs(dev);
free_ring(dev);
diff --git a/fs/Makefile b/fs/Makefile
index 83bf478e786..078d3d1191a 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_DNOTIFY) += dnotify.o
obj-$(CONFIG_PROC_FS) += proc/
obj-y += partitions/
obj-$(CONFIG_SYSFS) += sysfs/
+obj-$(CONFIG_CONFIGFS_FS) += configfs/
obj-y += devpts/
obj-$(CONFIG_PROFILING) += dcookies.o
@@ -100,5 +101,4 @@ obj-$(CONFIG_BEFS_FS) += befs/
obj-$(CONFIG_HOSTFS) += hostfs/
obj-$(CONFIG_HPPFS) += hppfs/
obj-$(CONFIG_DEBUG_FS) += debugfs/
-obj-$(CONFIG_CONFIGFS_FS) += configfs/
obj-$(CONFIG_OCFS2_FS) += ocfs2/
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 5638c8f9362..5f952187fc5 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -505,13 +505,15 @@ static int populate_groups(struct config_group *group)
int i;
if (group->default_groups) {
- /* FYI, we're faking mkdir here
+ /*
+ * FYI, we're faking mkdir here
* I'm not sure we need this semaphore, as we're called
* from our parent's mkdir. That holds our parent's
* i_mutex, so afaik lookup cannot continue through our
* parent to find us, let alone mess with our tree.
* That said, taking our i_mutex is closer to mkdir
- * emulation, and shouldn't hurt. */
+ * emulation, and shouldn't hurt.
+ */
mutex_lock(&dentry->d_inode->i_mutex);
for (i = 0; group->default_groups[i]; i++) {
@@ -546,20 +548,34 @@ static void unlink_obj(struct config_item *item)
item->ci_group = NULL;
item->ci_parent = NULL;
+
+ /* Drop the reference for ci_entry */
config_item_put(item);
+ /* Drop the reference for ci_parent */
config_group_put(group);
}
}
static void link_obj(struct config_item *parent_item, struct config_item *item)
{
- /* Parent seems redundant with group, but it makes certain
- * traversals much nicer. */
+ /*
+ * Parent seems redundant with group, but it makes certain
+ * traversals much nicer.
+ */
item->ci_parent = parent_item;
+
+ /*
+ * We hold a reference on the parent for the child's ci_parent
+ * link.
+ */
item->ci_group = config_group_get(to_config_group(parent_item));
list_add_tail(&item->ci_entry, &item->ci_group->cg_children);
+ /*
+ * We hold a reference on the child for ci_entry on the parent's
+ * cg_children
+ */
config_item_get(item);
}
@@ -684,6 +700,10 @@ static void client_drop_item(struct config_item *parent_item,
type = parent_item->ci_type;
BUG_ON(!type);
+ /*
+ * If ->drop_item() exists, it is responsible for the
+ * config_item_put().
+ */
if (type->ct_group_ops && type->ct_group_ops->drop_item)
type->ct_group_ops->drop_item(to_config_group(parent_item),
item);
@@ -694,23 +714,28 @@ static void client_drop_item(struct config_item *parent_item,
static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
- int ret;
+ int ret, module_got = 0;
struct config_group *group;
struct config_item *item;
struct config_item *parent_item;
struct configfs_subsystem *subsys;
struct configfs_dirent *sd;
struct config_item_type *type;
- struct module *owner;
+ struct module *owner = NULL;
char *name;
- if (dentry->d_parent == configfs_sb->s_root)
- return -EPERM;
+ if (dentry->d_parent == configfs_sb->s_root) {
+ ret = -EPERM;
+ goto out;
+ }
sd = dentry->d_parent->d_fsdata;
- if (!(sd->s_type & CONFIGFS_USET_DIR))
- return -EPERM;
+ if (!(sd->s_type & CONFIGFS_USET_DIR)) {
+ ret = -EPERM;
+ goto out;
+ }
+ /* Get a working ref for the duration of this function */
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
subsys = to_config_group(parent_item)->cg_subsys;
@@ -719,15 +744,16 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (!type || !type->ct_group_ops ||
(!type->ct_group_ops->make_group &&
!type->ct_group_ops->make_item)) {
- config_item_put(parent_item);
- return -EPERM; /* What lack-of-mkdir returns */
+ ret = -EPERM; /* Lack-of-mkdir returns -EPERM */
+ goto out_put;
}
name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL);
if (!name) {
- config_item_put(parent_item);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_put;
}
+
snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name);
down(&subsys->su_sem);
@@ -748,40 +774,67 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
kfree(name);
if (!item) {
- config_item_put(parent_item);
- return -ENOMEM;
+ /*
+ * If item == NULL, then link_obj() was never called.
+ * There are no extra references to clean up.
+ */
+ ret = -ENOMEM;
+ goto out_put;
}
- ret = -EINVAL;
+ /*
+ * link_obj() has been called (via link_group() for groups).
+ * From here on out, errors must clean that up.
+ */
+
type = item->ci_type;
- if (type) {
- owner = type->ct_owner;
- if (try_module_get(owner)) {
- if (group) {
- ret = configfs_attach_group(parent_item,
- item,
- dentry);
- } else {
- ret = configfs_attach_item(parent_item,
- item,
- dentry);
- }
+ if (!type) {
+ ret = -EINVAL;
+ goto out_unlink;
+ }
- if (ret) {
- down(&subsys->su_sem);
- if (group)
- unlink_group(group);
- else
- unlink_obj(item);
- client_drop_item(parent_item, item);
- up(&subsys->su_sem);
+ owner = type->ct_owner;
+ if (!try_module_get(owner)) {
+ ret = -EINVAL;
+ goto out_unlink;
+ }
- config_item_put(parent_item);
- module_put(owner);
- }
- }
+ /*
+ * I hate doing it this way, but if there is
+ * an error, module_put() probably should
+ * happen after any cleanup.
+ */
+ module_got = 1;
+
+ if (group)
+ ret = configfs_attach_group(parent_item, item, dentry);
+ else
+ ret = configfs_attach_item(parent_item, item, dentry);
+
+out_unlink:
+ if (ret) {
+ /* Tear down everything we built up */
+ down(&subsys->su_sem);
+ if (group)
+ unlink_group(group);
+ else
+ unlink_obj(item);
+ client_drop_item(parent_item, item);
+ up(&subsys->su_sem);
+
+ if (module_got)
+ module_put(owner);
}
+out_put:
+ /*
+ * link_obj()/link_group() took a reference from child->parent,
+ * so the parent is safely pinned. We can drop our working
+ * reference.
+ */
+ config_item_put(parent_item);
+
+out:
return ret;
}
@@ -801,6 +854,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
if (sd->s_type & CONFIGFS_USET_DEFAULT)
return -EPERM;
+ /* Get a working ref until we have the child */
parent_item = configfs_get_config_item(dentry->d_parent);
subsys = to_config_group(parent_item)->cg_subsys;
BUG_ON(!subsys);
@@ -817,6 +871,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
return ret;
}
+ /* Get a working ref for the duration of this function */
item = configfs_get_config_item(dentry);
/* Drop reference from above, item already holds one. */
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0d858d0b25b..47152bf9a7f 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -276,13 +276,29 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
return ret;
}
+/* This can also be called from ocfs2_write_zero_page() which has done
+ * it's own cluster locking. */
+int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
+ unsigned from, unsigned to)
+{
+ int ret;
+
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ ret = block_prepare_write(page, from, to, ocfs2_get_block);
+
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ return ret;
+}
+
/*
* ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called
* from loopback. It must be able to perform its own locking around
* ocfs2_get_block().
*/
-int ocfs2_prepare_write(struct file *file, struct page *page,
- unsigned from, unsigned to)
+static int ocfs2_prepare_write(struct file *file, struct page *page,
+ unsigned from, unsigned to)
{
struct inode *inode = page->mapping->host;
int ret;
@@ -295,11 +311,7 @@ int ocfs2_prepare_write(struct file *file, struct page *page,
goto out;
}
- down_read(&OCFS2_I(inode)->ip_alloc_sem);
-
- ret = block_prepare_write(page, from, to, ocfs2_get_block);
-
- up_read(&OCFS2_I(inode)->ip_alloc_sem);
+ ret = ocfs2_prepare_write_nolock(inode, page, from, to);
ocfs2_meta_unlock(inode, 0);
out:
@@ -625,11 +637,31 @@ static ssize_t ocfs2_direct_IO(int rw,
int ret;
mlog_entry_void();
+
+ /*
+ * We get PR data locks even for O_DIRECT. This allows
+ * concurrent O_DIRECT I/O but doesn't let O_DIRECT with
+ * extending and buffered zeroing writes race. If they did
+ * race then the buffered zeroing could be written back after
+ * the O_DIRECT I/O. It's one thing to tell people not to mix
+ * buffered and O_DIRECT writes, but expecting them to
+ * understand that file extension is also an implicit buffered
+ * write is too much. By getting the PR we force writeback of
+ * the buffered zeroing before proceeding.
+ */
+ ret = ocfs2_data_lock(inode, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ ocfs2_data_unlock(inode, 0);
+
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
inode->i_sb->s_bdev, iov, offset,
nr_segs,
ocfs2_direct_IO_get_blocks,
ocfs2_dio_end_io);
+out:
mlog_exit(ret);
return ret;
}
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index d40456d509a..e88c3f0b8fa 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -22,8 +22,8 @@
#ifndef OCFS2_AOPS_H
#define OCFS2_AOPS_H
-int ocfs2_prepare_write(struct file *file, struct page *page,
- unsigned from, unsigned to);
+int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
+ unsigned from, unsigned to);
struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
struct page *page,
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 4601fc256f1..1a5c69071df 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -569,7 +569,7 @@ static int ocfs2_extent_map_insert(struct inode *inode,
ret = -ENOMEM;
ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep,
- GFP_KERNEL);
+ GFP_NOFS);
if (!ctxt.new_ent) {
mlog_errno(ret);
return ret;
@@ -583,14 +583,14 @@ static int ocfs2_extent_map_insert(struct inode *inode,
if (ctxt.need_left && !ctxt.left_ent) {
ctxt.left_ent =
kmem_cache_alloc(ocfs2_em_ent_cachep,
- GFP_KERNEL);
+ GFP_NOFS);
if (!ctxt.left_ent)
break;
}
if (ctxt.need_right && !ctxt.right_ent) {
ctxt.right_ent =
kmem_cache_alloc(ocfs2_em_ent_cachep,
- GFP_KERNEL);
+ GFP_NOFS);
if (!ctxt.right_ent)
break;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 581eb451a41..a9559c87453 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -613,7 +613,8 @@ leave:
/* Some parts of this taken from generic_cont_expand, which turned out
* to be too fragile to do exactly what we need without us having to
- * worry about recursive locking in ->commit_write(). */
+ * worry about recursive locking in ->prepare_write() and
+ * ->commit_write(). */
static int ocfs2_write_zero_page(struct inode *inode,
u64 size)
{
@@ -641,7 +642,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
goto out;
}
- ret = ocfs2_prepare_write(NULL, page, offset, offset);
+ ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
if (ret < 0) {
mlog_errno(ret);
goto out_unlock;
@@ -695,13 +696,26 @@ out:
return ret;
}
+/*
+ * A tail_to_skip value > 0 indicates that we're being called from
+ * ocfs2_file_aio_write(). This has the following implications:
+ *
+ * - we don't want to update i_size
+ * - di_bh will be NULL, which is fine because it's only used in the
+ * case where we want to update i_size.
+ * - ocfs2_zero_extend() will then only be filling the hole created
+ * between i_size and the start of the write.
+ */
static int ocfs2_extend_file(struct inode *inode,
struct buffer_head *di_bh,
- u64 new_i_size)
+ u64 new_i_size,
+ size_t tail_to_skip)
{
int ret = 0;
u32 clusters_to_add;
+ BUG_ON(!tail_to_skip && !di_bh);
+
/* setattr sometimes calls us like this. */
if (new_i_size == 0)
goto out;
@@ -714,27 +728,44 @@ static int ocfs2_extend_file(struct inode *inode,
OCFS2_I(inode)->ip_clusters;
if (clusters_to_add) {
- ret = ocfs2_extend_allocation(inode, clusters_to_add);
+ /*
+ * protect the pages that ocfs2_zero_extend is going to
+ * be pulling into the page cache.. we do this before the
+ * metadata extend so that we don't get into the situation
+ * where we've extended the metadata but can't get the data
+ * lock to zero.
+ */
+ ret = ocfs2_data_lock(inode, 1);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
- ret = ocfs2_zero_extend(inode, new_i_size);
+ ret = ocfs2_extend_allocation(inode, clusters_to_add);
if (ret < 0) {
mlog_errno(ret);
- goto out;
+ goto out_unlock;
}
- }
- /* No allocation required, we just use this helper to
- * do a trivial update of i_size. */
- ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
+ ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+ }
+
+ if (!tail_to_skip) {
+ /* We're being called from ocfs2_setattr() which wants
+ * us to update i_size */
+ ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
+ if (ret < 0)
+ mlog_errno(ret);
}
+out_unlock:
+ if (clusters_to_add) /* this is the only case in which we lock */
+ ocfs2_data_unlock(inode, 1);
+
out:
return ret;
}
@@ -793,7 +824,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
if (i_size_read(inode) > attr->ia_size)
status = ocfs2_truncate_file(inode, bh, attr->ia_size);
else
- status = ocfs2_extend_file(inode, bh, attr->ia_size);
+ status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
@@ -1049,21 +1080,12 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
if (!clusters)
break;
- ret = ocfs2_extend_allocation(inode, clusters);
+ ret = ocfs2_extend_file(inode, NULL, newsize, count);
if (ret < 0) {
if (ret != -ENOSPC)
mlog_errno(ret);
goto out;
}
-
- /* Fill any holes which would've been created by this
- * write. If we're O_APPEND, this will wind up
- * (correctly) being a noop. */
- ret = ocfs2_zero_extend(inode, (u64) newsize - count);
- if (ret < 0) {
- mlog_errno(ret);
- goto out;
- }
break;
}
@@ -1146,6 +1168,22 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
ocfs2_iocb_set_rw_locked(iocb);
}
+ /*
+ * We're fine letting folks race truncates and extending
+ * writes with read across the cluster, just like they can
+ * locally. Hence no rw_lock during read.
+ *
+ * Take and drop the meta data lock to update inode fields
+ * like i_size. This allows the checks down below
+ * generic_file_aio_read() a chance of actually working.
+ */
+ ret = ocfs2_meta_lock(inode, NULL, NULL, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto bail;
+ }
+ ocfs2_meta_unlock(inode, 0);
+
ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos);
if (ret == -EINVAL)
mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 6a610ae5358..eebc3cfa6be 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -117,7 +117,7 @@ struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb)
{
struct ocfs2_journal_handle *retval = NULL;
- retval = kcalloc(1, sizeof(*retval), GFP_KERNEL);
+ retval = kcalloc(1, sizeof(*retval), GFP_NOFS);
if (!retval) {
mlog(ML_ERROR, "Failed to allocate memory for journal "
"handle!\n");
@@ -870,9 +870,11 @@ static int ocfs2_force_read_journal(struct inode *inode)
if (p_blocks > CONCURRENT_JOURNAL_FILL)
p_blocks = CONCURRENT_JOURNAL_FILL;
+ /* We are reading journal data which should not
+ * be put in the uptodate cache */
status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb),
p_blkno, p_blocks, bhs, 0,
- inode);
+ NULL);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -982,7 +984,7 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
{
struct ocfs2_la_recovery_item *item;
- item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_KERNEL);
+ item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS);
if (!item) {
/* Though we wish to avoid it, we are in fact safe in
* skipping local alloc cleanup as fsck.ocfs2 is more
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index 04a684dfdd9..b8a00a79332 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -337,7 +337,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
(unsigned long long)oi->ip_blkno,
(unsigned long long)block, expand_tree);
- new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_KERNEL);
+ new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
if (!new) {
mlog_errno(-ENOMEM);
return;
@@ -349,7 +349,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
* has no way of tracking that. */
for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) {
tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
- GFP_KERNEL);
+ GFP_NOFS);
if (!tree[i]) {
mlog_errno(-ENOMEM);
goto out_free;
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index 53049a20419..ee42765a855 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -586,7 +586,7 @@ static struct ocfs2_net_wait_ctxt *ocfs2_new_net_wait_ctxt(unsigned int response
{
struct ocfs2_net_wait_ctxt *w;
- w = kcalloc(1, sizeof(*w), GFP_KERNEL);
+ w = kcalloc(1, sizeof(*w), GFP_NOFS);
if (!w) {
mlog_errno(-ENOMEM);
goto bail;
@@ -749,7 +749,7 @@ static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb,
BUG_ON(!ocfs2_is_valid_vote_request(type));
- request = kcalloc(1, sizeof(*request), GFP_KERNEL);
+ request = kcalloc(1, sizeof(*request), GFP_NOFS);
if (!request) {
mlog_errno(-ENOMEM);
} else {
@@ -1129,7 +1129,7 @@ static int ocfs2_handle_vote_message(struct o2net_msg *msg,
struct ocfs2_super *osb = data;
struct ocfs2_vote_work *work;
- work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_KERNEL);
+ work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_NOFS);
if (!work) {
status = -ENOMEM;
mlog_errno(status);
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h
index 43ad4e55878..406ca97a8ab 100644
--- a/include/asm-arm/spinlock.h
+++ b/include/asm-arm/spinlock.h
@@ -142,6 +142,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
: "cc");
}
+/* write_can_lock - would write_trylock() succeed? */
+#define __raw_write_can_lock(x) ((x)->lock == 0x80000000)
+
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
@@ -198,4 +201,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
+/* read_can_lock - would read_trylock() succeed? */
+#define __raw_read_can_lock(x) ((x)->lock < 0x80000000)
+
#endif /* __ASM_SPINLOCK_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index bdc556d8849..03a14a30c46 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -69,6 +69,7 @@ struct mmc_data {
unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */
unsigned int timeout_clks; /* data timeout (in clocks) */
unsigned int blksz_bits; /* data block size */
+ unsigned int blksz; /* data block size */
unsigned int blocks; /* number of blocks */
unsigned int error; /* data error */
unsigned int flags;
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 34a1a09e5ae..807d6f1ef4b 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -99,6 +99,7 @@ typedef enum {
SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */
SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */
SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */
+ SCTP_CMD_SET_SK_ERR, /* Set sk_err */
SCTP_CMD_LAST
} sctp_verb_t;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index e673b2c984e..aa6033ca7cd 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -461,12 +461,12 @@ static inline int sctp_frag_point(const struct sctp_sock *sp, int pmtu)
* there is room for a param header too.
*/
#define sctp_walk_params(pos, chunk, member)\
-_sctp_walk_params((pos), (chunk), WORD_ROUND(ntohs((chunk)->chunk_hdr.length)), member)
+_sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
#define _sctp_walk_params(pos, chunk, end, member)\
for (pos.v = chunk->member;\
pos.v <= (void *)chunk + end - sizeof(sctp_paramhdr_t) &&\
- pos.v <= (void *)chunk + end - WORD_ROUND(ntohs(pos.p->length)) &&\
+ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
pos.v += WORD_ROUND(ntohs(pos.p->length)))
@@ -477,7 +477,7 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(sctp_chunkhdr_t));\
(void *)err <= (void *)chunk_hdr + end - sizeof(sctp_errhdr_t) &&\
- (void *)err <= (void *)chunk_hdr + end - WORD_ROUND(ntohs(err->length)) &&\
+ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
diff --git a/net/802/tr.c b/net/802/tr.c
index afd8385c0c9..e9dc803f2fe 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -643,6 +643,5 @@ static int __init rif_init(void)
module_init(rif_init);
-EXPORT_SYMBOL(tr_source_route);
EXPORT_SYMBOL(tr_type_trans);
EXPORT_SYMBOL(alloc_trdev);
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index d159c92cca8..466ed3440b7 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -168,7 +168,7 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr,
if (info->bitmask & EBT_LOG_NFLOG)
nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
- info->prefix);
+ "%s", info->prefix);
else
ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li,
info->prefix);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index c2d92f99a2b..d0d19192026 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -948,7 +948,7 @@ static int do_add_counters(void __user *user, unsigned int len)
write_lock_bh(&t->lock);
private = t->private;
- if (private->number != paddc->num_counters) {
+ if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c
index 6c4899d8046..96ceabaec40 100644
--- a/net/ipv4/netfilter/ip_nat_proto_gre.c
+++ b/net/ipv4/netfilter/ip_nat_proto_gre.c
@@ -49,15 +49,15 @@ gre_in_range(const struct ip_conntrack_tuple *tuple,
const union ip_conntrack_manip_proto *min,
const union ip_conntrack_manip_proto *max)
{
- u_int32_t key;
+ __be16 key;
if (maniptype == IP_NAT_MANIP_SRC)
key = tuple->src.u.gre.key;
else
key = tuple->dst.u.gre.key;
- return ntohl(key) >= ntohl(min->gre.key)
- && ntohl(key) <= ntohl(max->gre.key);
+ return ntohs(key) >= ntohs(min->gre.key)
+ && ntohs(key) <= ntohs(max->gre.key);
}
/* generate unique tuple ... */
@@ -81,14 +81,14 @@ gre_unique_tuple(struct ip_conntrack_tuple *tuple,
min = 1;
range_size = 0xffff;
} else {
- min = ntohl(range->min.gre.key);
- range_size = ntohl(range->max.gre.key) - min + 1;
+ min = ntohs(range->min.gre.key);
+ range_size = ntohs(range->max.gre.key) - min + 1;
}
DEBUGP("min = %u, range_size = %u\n", min, range_size);
for (i = 0; i < range_size; i++, key++) {
- *keyptr = htonl(min + key % range_size);
+ *keyptr = htons(min + key % range_size);
if (!ip_nat_used_tuple(tuple, conntrack))
return 1;
}
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 39fd4c2a238..b98f7b08b08 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -428,7 +428,7 @@ ipt_log_target(struct sk_buff **pskb,
if (loginfo->logflags & IPT_LOG_NFLOG)
nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
- loginfo->prefix);
+ "%s", loginfo->prefix);
else
ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
loginfo->prefix);
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 14384328570..b847ee409ef 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -821,6 +821,7 @@ checkentry(const char *tablename,
/* Create our proc 'status' entry. */
curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent);
if (!curr_table->status_proc) {
+ vfree(hold);
printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n");
/* Destroy the created table */
spin_lock_bh(&recent_lock);
@@ -845,7 +846,6 @@ checkentry(const char *tablename,
spin_unlock_bh(&recent_lock);
vfree(curr_table->time_info);
vfree(curr_table->hash_table);
- vfree(hold);
vfree(curr_table->table);
vfree(curr_table);
return 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9f0cca4c4fa..4a538bc1683 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1662,6 +1662,8 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
+ if (IsReno(tp))
+ tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1);
/* clear xmit_retrans hint */
if (tp->retransmit_skb_hint &&
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 0a673038344..2e72f89a701 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1103,7 +1103,7 @@ do_add_counters(void __user *user, unsigned int len)
write_lock_bh(&t->lock);
private = t->private;
- if (private->number != paddc->num_counters) {
+ if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index a96c0de14b0..73c6300109d 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -439,7 +439,7 @@ ip6t_log_target(struct sk_buff **pskb,
if (loginfo->logflags & IP6T_LOG_NFLOG)
nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
- loginfo->prefix);
+ "%s", loginfo->prefix);
else
ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
loginfo->prefix);
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index 94dbdb8b458..4f6b84c8f4a 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -40,7 +40,7 @@ match(const struct sk_buff *skb,
memset(eui64, 0, sizeof(eui64));
- if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) {
+ if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) {
if (skb->nh.ipv6h->version == 0x6) {
memcpy(eui64, eth_hdr(skb)->h_source, 3);
memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 2dbf134d526..811d998725b 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -944,9 +944,9 @@ out:
return rc;
}
-static int ipx_map_frame_type(unsigned char type)
+static __be16 ipx_map_frame_type(unsigned char type)
{
- int rc = 0;
+ __be16 rc = 0;
switch (type) {
case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break;
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 67774448efd..a394c6fe19a 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -119,7 +119,7 @@ out:
return rc;
}
-static int ipxrtr_delete(long net)
+static int ipxrtr_delete(__u32 net)
{
struct ipx_route *r, *tmp;
int rc;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index c60273cad77..61cdda4e5d3 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -321,7 +321,7 @@ static int
nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
{
spin_lock_bh(&inst->lock);
- inst->flags = ntohs(flags);
+ inst->flags = flags;
spin_unlock_bh(&inst->lock);
return 0;
@@ -902,7 +902,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
if (nfula[NFULA_CFG_FLAGS-1]) {
u_int16_t flags =
*(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]);
- nfulnl_set_flags(inst, ntohl(flags));
+ nfulnl_set_flags(inst, ntohs(flags));
}
out_put:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 31eb83717c2..138ea92ed26 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -193,8 +193,10 @@ static void dev_watchdog(unsigned long arg)
netif_running(dev) &&
netif_carrier_ok(dev)) {
if (netif_queue_stopped(dev) &&
- (jiffies - dev->trans_start) > dev->watchdog_timeo) {
- printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name);
+ time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
+
+ printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
+ dev->name);
dev->tx_timeout(dev);
}
if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index d117ebc75cf..1662f9cc869 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -73,6 +73,8 @@ static struct sctp_association *__sctp_lookup_association(
const union sctp_addr *peer,
struct sctp_transport **pt);
+static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
+
/* Calculate the SCTP checksum of an SCTP packet. */
static inline int sctp_rcv_checksum(struct sk_buff *skb)
@@ -186,7 +188,6 @@ int sctp_rcv(struct sk_buff *skb)
*/
if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
{
- sock_put(sk);
if (asoc) {
sctp_association_put(asoc);
asoc = NULL;
@@ -197,7 +198,6 @@ int sctp_rcv(struct sk_buff *skb)
sk = sctp_get_ctl_sock();
ep = sctp_sk(sk)->ep;
sctp_endpoint_hold(ep);
- sock_hold(sk);
rcvr = &ep->base;
}
@@ -253,25 +253,18 @@ int sctp_rcv(struct sk_buff *skb)
*/
sctp_bh_lock_sock(sk);
- /* It is possible that the association could have moved to a different
- * socket if it is peeled off. If so, update the sk.
- */
- if (sk != rcvr->sk) {
- sctp_bh_lock_sock(rcvr->sk);
- sctp_bh_unlock_sock(sk);
- sk = rcvr->sk;
- }
-
if (sock_owned_by_user(sk))
- sk_add_backlog(sk, skb);
+ sctp_add_backlog(sk, skb);
else
- sctp_backlog_rcv(sk, skb);
+ sctp_inq_push(&chunk->rcvr->inqueue, chunk);
- /* Release the sock and the sock ref we took in the lookup calls.
- * The asoc/ep ref will be released in sctp_backlog_rcv.
- */
sctp_bh_unlock_sock(sk);
- sock_put(sk);
+
+ /* Release the asoc/ep ref we took in the lookup calls. */
+ if (asoc)
+ sctp_association_put(asoc);
+ else
+ sctp_endpoint_put(ep);
return 0;
@@ -280,8 +273,7 @@ discard_it:
return 0;
discard_release:
- /* Release any structures we may be holding. */
- sock_put(sk);
+ /* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
@@ -290,56 +282,87 @@ discard_release:
goto discard_it;
}
-/* Handle second half of inbound skb processing. If the sock was busy,
- * we may have need to delay processing until later when the sock is
- * released (on the backlog). If not busy, we call this routine
- * directly from the bottom half.
+/* Process the backlog queue of the socket. Every skb on
+ * the backlog holds a ref on an association or endpoint.
+ * We hold this ref throughout the state machine to make
+ * sure that the structure we need is still around.
*/
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
- struct sctp_inq *inqueue = NULL;
+ struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
struct sctp_ep_common *rcvr = NULL;
+ int backloged = 0;
rcvr = chunk->rcvr;
- BUG_TRAP(rcvr->sk == sk);
-
- if (rcvr->dead) {
- sctp_chunk_free(chunk);
- } else {
- inqueue = &chunk->rcvr->inqueue;
- sctp_inq_push(inqueue, chunk);
- }
-
- /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */
- if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
- sctp_association_put(sctp_assoc(rcvr));
- else
- sctp_endpoint_put(sctp_ep(rcvr));
-
+ /* If the rcvr is dead then the association or endpoint
+ * has been deleted and we can safely drop the chunk
+ * and refs that we are holding.
+ */
+ if (rcvr->dead) {
+ sctp_chunk_free(chunk);
+ goto done;
+ }
+
+ if (unlikely(rcvr->sk != sk)) {
+ /* In this case, the association moved from one socket to
+ * another. We are currently sitting on the backlog of the
+ * old socket, so we need to move.
+ * However, since we are here in the process context we
+ * need to take make sure that the user doesn't own
+ * the new socket when we process the packet.
+ * If the new socket is user-owned, queue the chunk to the
+ * backlog of the new socket without dropping any refs.
+ * Otherwise, we can safely push the chunk on the inqueue.
+ */
+
+ sk = rcvr->sk;
+ sctp_bh_lock_sock(sk);
+
+ if (sock_owned_by_user(sk)) {
+ sk_add_backlog(sk, skb);
+ backloged = 1;
+ } else
+ sctp_inq_push(inqueue, chunk);
+
+ sctp_bh_unlock_sock(sk);
+
+ /* If the chunk was backloged again, don't drop refs */
+ if (backloged)
+ return 0;
+ } else {
+ sctp_inq_push(inqueue, chunk);
+ }
+
+done:
+ /* Release the refs we took in sctp_add_backlog */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_association_put(sctp_assoc(rcvr));
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_put(sctp_ep(rcvr));
+ else
+ BUG();
+
return 0;
}
-void sctp_backlog_migrate(struct sctp_association *assoc,
- struct sock *oldsk, struct sock *newsk)
+static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
- struct sk_buff *skb;
- struct sctp_chunk *chunk;
+ struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+ struct sctp_ep_common *rcvr = chunk->rcvr;
- skb = oldsk->sk_backlog.head;
- oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
- while (skb != NULL) {
- struct sk_buff *next = skb->next;
-
- chunk = SCTP_INPUT_CB(skb)->chunk;
- skb->next = NULL;
- if (&assoc->base == chunk->rcvr)
- sk_add_backlog(newsk, skb);
- else
- sk_add_backlog(oldsk, skb);
- skb = next;
- }
+ /* Hold the assoc/ep while hanging on the backlog queue.
+ * This way, we know structures we need will not disappear from us
+ */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_association_hold(sctp_assoc(rcvr));
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_hold(sctp_ep(rcvr));
+ else
+ BUG();
+
+ sk_add_backlog(sk, skb);
}
/* Handle icmp frag needed error. */
@@ -412,7 +435,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
union sctp_addr daddr;
struct sctp_af *af;
struct sock *sk = NULL;
- struct sctp_association *asoc = NULL;
+ struct sctp_association *asoc;
struct sctp_transport *transport = NULL;
*app = NULL; *tpp = NULL;
@@ -453,7 +476,6 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
return sk;
out:
- sock_put(sk);
if (asoc)
sctp_association_put(asoc);
return NULL;
@@ -463,7 +485,6 @@ out:
void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
{
sctp_bh_unlock_sock(sk);
- sock_put(sk);
if (asoc)
sctp_association_put(asoc);
}
@@ -490,7 +511,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
int type = skb->h.icmph->type;
int code = skb->h.icmph->code;
struct sock *sk;
- struct sctp_association *asoc;
+ struct sctp_association *asoc = NULL;
struct sctp_transport *transport;
struct inet_sock *inet;
char *saveip, *savesctp;
@@ -716,7 +737,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
hit:
sctp_endpoint_hold(ep);
- sock_hold(epb->sk);
read_unlock(&head->lock);
return ep;
}
@@ -818,7 +838,6 @@ static struct sctp_association *__sctp_lookup_association(
hit:
*pt = transport;
sctp_association_hold(asoc);
- sock_hold(epb->sk);
read_unlock(&head->lock);
return asoc;
}
@@ -846,7 +865,6 @@ int sctp_has_association(const union sctp_addr *laddr,
struct sctp_transport *transport;
if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
- sock_put(asoc->base.sk);
sctp_association_put(asoc);
return 1;
}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 8d1dc24bab4..c5beb2ad7ef 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -498,10 +498,6 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
- /* Set sk_err to ECONNRESET on a 1-1 style socket. */
- if (!sctp_style(asoc->base.sk, UDP))
- asoc->base.sk->sk_err = ECONNRESET;
-
/* SEND_FAILED sent later when cleaning up the association. */
asoc->outqueue.error = error;
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
@@ -838,6 +834,15 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
return;
}
+/* Helper function to set sk_err on a 1-1 style socket. */
+static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
+{
+ struct sock *sk = asoc->base.sk;
+
+ if (!sctp_style(sk, UDP))
+ sk->sk_err = error;
+}
+
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
@@ -1458,6 +1463,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
local_cork = 0;
asoc->peer.retran_path = t;
break;
+ case SCTP_CMD_SET_SK_ERR:
+ sctp_cmd_set_sk_err(asoc, cmd->obj.error);
+ break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 8cdba51ec07..8bc279219a7 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -93,7 +93,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
- __u16 error,
+ __u16 error, int sk_err,
const struct sctp_association *asoc,
struct sctp_transport *transport);
@@ -448,7 +448,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
__u32 init_tag;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
- sctp_disposition_t ret;
+ __u16 error;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -480,11 +480,9 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_CLOSED));
- SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
- sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
- return SCTP_DISPOSITION_DELETE_TCB;
+ return sctp_stop_t1_and_abort(commands, SCTP_ERROR_INV_PARAM,
+ ECONNREFUSED, asoc,
+ chunk->transport);
}
/* Verify the INIT chunk before processing it. */
@@ -511,27 +509,16 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_CLOSED));
- sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
- SCTP_NULL());
- return SCTP_DISPOSITION_CONSUME;
+ error = SCTP_ERROR_INV_PARAM;
} else {
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_CLOSED));
- sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
- SCTP_NULL());
- return SCTP_DISPOSITION_NOMEM;
+ error = SCTP_ERROR_NO_RESOURCE;
}
} else {
- ret = sctp_sf_tabort_8_4_8(ep, asoc, type, arg,
- commands);
- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
- SCTP_STATE(SCTP_STATE_CLOSED));
- sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB,
- SCTP_NULL());
- return ret;
+ sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+ error = SCTP_ERROR_INV_PARAM;
}
+ return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED,
+ asoc, chunk->transport);
}
/* Tag the variable length parameters. Note that we never
@@ -886,6 +873,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
struct sctp_transport *transport = (struct sctp_transport *) arg;
if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -1030,6 +1019,12 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
commands);
hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
+ /* Make sure that the length of the parameter is what we expect */
+ if (ntohs(hbinfo->param_hdr.length) !=
+ sizeof(sctp_sender_hb_info_t)) {
+ return SCTP_DISPOSITION_DISCARD;
+ }
+
from_addr = hbinfo->daddr;
link = sctp_assoc_lookup_paddr(asoc, &from_addr);
@@ -2126,6 +2121,8 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
int attempts = asoc->init_err_counter + 1;
if (attempts > asoc->max_init_attempts) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_STALE_COOKIE));
return SCTP_DISPOSITION_DELETE_TCB;
@@ -2262,6 +2259,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -2306,7 +2304,8 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
- return sctp_stop_t1_and_abort(commands, error, asoc, chunk->transport);
+ return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc,
+ chunk->transport);
}
/*
@@ -2318,7 +2317,8 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep
void *arg,
sctp_cmd_seq_t *commands)
{
- return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, asoc,
+ return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR,
+ ENOPROTOOPT, asoc,
(struct sctp_transport *)arg);
}
@@ -2343,7 +2343,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep,
* This is common code called by several sctp_sf_*_abort() functions above.
*/
static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
- __u16 error,
+ __u16 error, int sk_err,
const struct sctp_association *asoc,
struct sctp_transport *transport)
{
@@ -2353,6 +2353,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
/* CMD_INIT_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(error));
@@ -3336,6 +3337,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3362,6 +3365,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -3714,9 +3719,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNREFUSED));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
} else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_PROTO_VIOLATION));
SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
@@ -4034,6 +4043,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
* TCB. This is a departure from our typical NOMEM handling.
*/
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4175,6 +4186,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
* TCB. This is a departure from our typical NOMEM handling.
*/
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNREFUSED));
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_USER_ABORT));
@@ -4543,6 +4556,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
struct sctp_transport *transport = arg;
if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4662,6 +4677,8 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d"
" max_init_attempts: %d\n",
attempts, asoc->max_init_attempts);
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
return SCTP_DISPOSITION_DELETE_TCB;
@@ -4711,6 +4728,8 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
} else {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
return SCTP_DISPOSITION_DELETE_TCB;
@@ -4742,6 +4761,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep,
SCTP_DEBUG_PRINTK("Timer T2 expired.\n");
if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -4817,6 +4838,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire(
if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
@@ -4870,6 +4893,8 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep,
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -5309,6 +5334,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_DATA));
SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b6e4b89539b..174d4d35e95 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1057,6 +1057,7 @@ static int __sctp_connect(struct sock* sk,
inet_sk(sk)->dport = htons(asoc->peer.port);
af = sctp_get_af_specific(to.sa.sa_family);
af->to_sk_daddr(&to, sk);
+ sk->sk_err = 0;
timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
err = sctp_wait_for_connect(asoc, &timeo);
@@ -1228,7 +1229,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
ep = sctp_sk(sk)->ep;
- /* Walk all associations on a socket, not on an endpoint. */
+ /* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
@@ -1241,13 +1242,13 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
if (sctp_state(asoc, CLOSED)) {
sctp_unhash_established(asoc);
sctp_association_free(asoc);
+ continue;
+ }
+ }
- } else if (sock_flag(sk, SOCK_LINGER) &&
- !sk->sk_lingertime)
- sctp_primitive_ABORT(asoc, NULL);
- else
- sctp_primitive_SHUTDOWN(asoc, NULL);
- } else
+ if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)
+ sctp_primitive_ABORT(asoc, NULL);
+ else
sctp_primitive_SHUTDOWN(asoc, NULL);
}
@@ -5317,6 +5318,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
*/
sctp_release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
+ BUG_ON(sk != asoc->base.sk);
sctp_lock_sock(sk);
*timeo_p = current_timeo;
@@ -5604,12 +5606,14 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
*/
newsp->type = type;
- spin_lock_bh(&oldsk->sk_lock.slock);
- /* Migrate the backlog from oldsk to newsk. */
- sctp_backlog_migrate(assoc, oldsk, newsk);
- /* Migrate the association to the new socket. */
+ /* Mark the new socket "in-use" by the user so that any packets
+ * that may arrive on the association after we've moved it are
+ * queued to the backlog. This prevents a potential race between
+ * backlog processing on the old socket and new-packet processing
+ * on the new socket.
+ */
+ sctp_lock_sock(newsk);
sctp_assoc_migrate(assoc, newsk);
- spin_unlock_bh(&oldsk->sk_lock.slock);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
@@ -5618,6 +5622,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
newsk->sk_shutdown |= RCV_SHUTDOWN;
newsk->sk_state = SCTP_SS_ESTABLISHED;
+ sctp_release_sock(newsk);
}
/* This proto struct describes the ULP interface for SCTP. */