summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/braille/braille_console.c13
-rw-r--r--drivers/acpi/glue.c5
-rw-r--r--drivers/acpi/sleep/proc.c10
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c25
-rw-r--r--drivers/ata/ata_piix.c184
-rw-r--r--drivers/ata/libata-core.c252
-rw-r--r--drivers/ata/libata-eh.c375
-rw-r--r--drivers/ata/libata-scsi.c112
-rw-r--r--drivers/ata/libata.h5
-rw-r--r--drivers/ata/pata_bf54x.c34
-rw-r--r--drivers/ata/pata_sil680.c2
-rw-r--r--drivers/ata/sata_fsl.c26
-rw-r--r--drivers/ata/sata_inic162x.c8
-rw-r--r--drivers/ata/sata_mv.c28
-rw-r--r--drivers/ata/sata_nv.c62
-rw-r--r--drivers/ata/sata_promise.c16
-rw-r--r--drivers/ata/sata_qstor.c12
-rw-r--r--drivers/ata/sata_sil.c16
-rw-r--r--drivers/ata/sata_sil24.c12
-rw-r--r--drivers/ata/sata_sis.c28
-rw-r--r--drivers/ata/sata_svw.c10
-rw-r--r--drivers/ata/sata_uli.c24
-rw-r--r--drivers/ata/sata_via.c24
-rw-r--r--drivers/ata/sata_vsc.c10
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/class.c136
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/block/aoe/aoeblk.c6
-rw-r--r--drivers/block/aoe/aoecmd.c19
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/cciss.c8
-rw-r--r--drivers/block/cciss_scsi.c151
-rw-r--r--drivers/block/cciss_scsi.h4
-rw-r--r--drivers/block/cpqarray.c2
-rw-r--r--drivers/block/floppy.c31
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/block/ps3disk.c11
-rw-r--r--drivers/block/virtio_blk.c14
-rw-r--r--drivers/block/xen-blkfront.c76
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btusb.c36
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/cdrom/gdrom.c4
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/char/tpm/Kconfig1
-rw-r--r--drivers/char/tty_io.c14
-rw-r--r--drivers/clocksource/acpi_pm.c21
-rw-r--r--drivers/cpufreq/cpufreq.c30
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c5
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c147
-rw-r--r--drivers/cpufreq/cpufreq_performance.c4
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c4
-rw-r--r--drivers/crypto/talitos.c6
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/hwmon/abituguru3.c3
-rw-r--r--drivers/hwmon/ad7414.c2
-rw-r--r--drivers/hwmon/atxp1.c18
-rw-r--r--drivers/hwmon/it87.c74
-rw-r--r--drivers/i2c/busses/i2c-powermac.c4
-rw-r--r--drivers/i2c/i2c-dev.c4
-rw-r--r--drivers/ide/Kconfig18
-rw-r--r--drivers/ide/ide-cd.c8
-rw-r--r--drivers/ide/ide-disk.c15
-rw-r--r--drivers/ide/ide-dma.c2
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ide/ide-tape.c10
-rw-r--r--drivers/ide/mips/Makefile1
-rw-r--r--drivers/ide/mips/swarm.c196
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/core/mad.c5
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h14
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c225
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c211
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c7
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c9
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c51
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c59
-rw-r--r--drivers/infiniband/hw/nes/nes.c95
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c52
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c205
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h6
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c122
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c88
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c30
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c77
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c62
-rw-r--r--drivers/input/mouse/bcm5974.c13
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c4
-rw-r--r--drivers/leds/leds-fsg.c28
-rw-r--r--drivers/leds/leds-pca955x.c70
-rw-r--r--drivers/md/dm-crypt.c109
-rw-r--r--drivers/md/dm-exception-store.c29
-rw-r--r--drivers/md/dm-ioctl.c10
-rw-r--r--drivers/md/dm-mpath.c66
-rw-r--r--drivers/md/dm-mpath.h2
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm-table.c97
-rw-r--r--drivers/md/dm.c52
-rw-r--r--drivers/md/dm.h10
-rw-r--r--drivers/md/linear.c10
-rw-r--r--drivers/md/md.c23
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c10
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c14
-rw-r--r--drivers/md/raid5.c75
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.h1
-rw-r--r--drivers/media/dvb/b2c2/flexcop-fe-tuner.c1
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c16
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c16
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c11
-rw-r--r--drivers/media/dvb/frontends/s5h1420.h8
-rw-r--r--drivers/media/dvb/siano/sms-cards.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/video/cafe_ccic.c1
-rw-r--r--drivers/media/video/cpia2/cpia2_usb.c5
-rw-r--r--drivers/media/video/cx18/cx18-cards.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c12
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c55
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c9
-rw-r--r--drivers/media/video/gspca/gspca.c3
-rw-r--r--drivers/media/video/gspca/pac7311.c1
-rw-r--r--drivers/media/video/gspca/sonixb.c4
-rw-r--r--drivers/media/video/gspca/sonixj.c19
-rw-r--r--drivers/media/video/gspca/spca561.c2
-rw-r--r--drivers/media/video/gspca/zc3xx.c4
-rw-r--r--drivers/media/video/ov511.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.c2
-rw-r--r--drivers/media/video/s2255drv.c3
-rw-r--r--drivers/media/video/saa7134/saa7134-alsa.c13
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c2
-rw-r--r--drivers/media/video/w9968cf.c2
-rw-r--r--drivers/media/video/wm8739.c4
-rw-r--r--drivers/media/video/zoran_card.c2
-rw-r--r--drivers/media/video/zoran_driver.c15
-rw-r--r--drivers/memstick/core/mspro_block.c4
-rw-r--r--drivers/mfd/Kconfig4
-rw-r--r--drivers/mfd/asic3.c2
-rw-r--r--drivers/misc/eeepc-laptop.c16
-rw-r--r--drivers/mmc/card/block.c21
-rw-r--r--drivers/mmc/card/mmc_test.c4
-rw-r--r--drivers/mmc/host/atmel-mci.c18
-rw-r--r--drivers/mmc/host/tmio_mmc.h4
-rw-r--r--drivers/mtd/ftl.c24
-rw-r--r--drivers/mtd/mtd_blkdevs.c16
-rw-r--r--drivers/net/bnx2.h2
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/e1000/e1000_hw.c23
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c9
-rw-r--r--drivers/net/e1000e/ich8lan.c78
-rw-r--r--drivers/net/e1000e/netdev.c72
-rw-r--r--drivers/net/e1000e/param.c30
-rw-r--r--drivers/net/forcedeth.c16
-rw-r--r--drivers/net/hp-plus.c8
-rw-r--r--drivers/net/mlx4/mr.c10
-rw-r--r--drivers/net/wireless/ath9k/core.c11
-rw-r--r--drivers/net/wireless/ath9k/core.h1
-rw-r--r--drivers/net/wireless/ath9k/main.c12
-rw-r--r--drivers/net/wireless/ath9k/xmit.c6
-rw-r--r--drivers/net/wireless/b43/rfkill.c18
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.c18
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/pci/pci-sysfs.c19
-rw-r--r--drivers/pci/pcie/aspm.c2
-rw-r--r--drivers/pci/search.c6
-rw-r--r--drivers/pcmcia/ds.c23
-rw-r--r--drivers/pnp/Makefile5
-rw-r--r--drivers/pnp/pnpacpi/core.c2
-rw-r--r--drivers/pnp/pnpbios/core.c2
-rw-r--r--drivers/rtc/rtc-dev.c15
-rw-r--r--drivers/s390/block/dasd.c32
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c28
-rw-r--r--drivers/s390/block/dasd_eckd.c132
-rw-r--r--drivers/s390/block/dasd_eer.c6
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/dasd_int.h4
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c520
-rw-r--r--drivers/s390/block/xpram.c37
-rw-r--r--drivers/s390/char/con3215.c53
-rw-r--r--drivers/s390/char/con3270.c27
-rw-r--r--drivers/s390/char/raw3270.c4
-rw-r--r--drivers/s390/char/sclp_con.c24
-rw-r--r--drivers/s390/char/sclp_vt220.c26
-rw-r--r--drivers/s390/char/tape_3590.c132
-rw-r--r--drivers/s390/char/tape_block.c2
-rw-r--r--drivers/s390/char/tape_core.c21
-rw-r--r--drivers/s390/char/tape_proc.c2
-rw-r--r--drivers/s390/char/tape_std.c13
-rw-r--r--drivers/s390/char/vmlogrdr.c5
-rw-r--r--drivers/s390/char/vmur.c6
-rw-r--r--drivers/s390/cio/blacklist.c11
-rw-r--r--drivers/s390/cio/ccwgroup.c7
-rw-r--r--drivers/s390/cio/chp.c3
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cio.c57
-rw-r--r--drivers/s390/cio/cio.h4
-rw-r--r--drivers/s390/cio/css.c4
-rw-r--r--drivers/s390/cio/device.c118
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c6
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/io_sch.h22
-rw-r--r--drivers/s390/cio/ioasm.h49
-rw-r--r--drivers/s390/cio/qdio.h11
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/cio/qdio_main.c32
-rw-r--r--drivers/s390/cio/qdio_setup.c55
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/kvm/kvm_virtio.c15
-rw-r--r--drivers/s390/net/claw.c36
-rw-r--r--drivers/s390/net/claw.h2
-rw-r--r--drivers/s390/net/ctcm_main.c22
-rw-r--r--drivers/s390/net/ctcm_main.h2
-rw-r--r--drivers/s390/net/lcs.c34
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/net/qeth_core.h10
-rw-r--r--drivers/s390/net/qeth_core_main.c10
-rw-r--r--drivers/s390/s390_rdev.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c149
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c45
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c75
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h183
-rw-r--r--drivers/s390/scsi/zfcp_erp.c231
-rw-r--r--drivers/s390/scsi/zfcp_ext.h27
-rw-r--r--drivers/s390/scsi/zfcp_fc.c227
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c584
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h75
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c67
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c28
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c62
-rw-r--r--drivers/scsi/Kconfig8
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c4
-rw-r--r--drivers/scsi/gdth.c60
-rw-r--r--drivers/scsi/gdth.h2
-rw-r--r--drivers/scsi/gdth_proc.c66
-rw-r--r--drivers/scsi/gdth_proc.h3
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ide-scsi.c2
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/libiscsi.c19
-rw-r--r--drivers/scsi/libsas/sas_ata.c10
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c30
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c6
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h71
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c338
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/qlogicpti.c1
-rw-r--r--drivers/scsi/scsi.c105
-rw-r--r--drivers/scsi/scsi_error.c90
-rw-r--r--drivers/scsi/scsi_lib.c59
-rw-r--r--drivers/scsi/scsi_netlink.c523
-rw-r--r--drivers/scsi/scsi_priv.h7
-rw-r--r--drivers/scsi/scsi_proc.c8
-rw-r--r--drivers/scsi/scsi_scan.c20
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/scsi_tgt_lib.c8
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/sd.c122
-rw-r--r--drivers/scsi/sg.c667
-rw-r--r--drivers/scsi/sr.c7
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/tmscsim.c4
-rw-r--r--drivers/serial/atmel_serial.c32
-rw-r--r--drivers/spi/orion_spi.c2
-rw-r--r--drivers/spi/pxa2xx_spi.c4
-rw-r--r--drivers/ssb/main.c1
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/core/hub.c39
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c4
-rw-r--r--drivers/usb/host/ehci-hcd.c26
-rw-r--r--drivers/usb/host/ehci-ixp4xx.c2
-rw-r--r--drivers/usb/host/ehci-sched.c32
-rw-r--r--drivers/usb/musb/Kconfig1
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/musb/omap2430.c4
-rw-r--r--drivers/usb/musb/omap2430.h4
-rw-r--r--drivers/usb/serial/cp2101.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.h5
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/sierra.c12
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/usb/storage/Kconfig12
-rw-r--r--drivers/usb/storage/Makefile3
-rw-r--r--drivers/usb/storage/unusual_devs.h30
-rw-r--r--drivers/usb/storage/usb.c2
-rw-r--r--drivers/video/console/fbcon.c6
-rw-r--r--drivers/video/console/fbcon.h2
-rw-r--r--drivers/watchdog/geodewdt.c6
-rw-r--r--drivers/watchdog/ibmasr.c1
-rw-r--r--drivers/watchdog/pnx4008_wdt.c4
-rw-r--r--drivers/watchdog/rc32434_wdt.c6
-rw-r--r--drivers/watchdog/rdc321x_wdt.c6
-rw-r--r--drivers/watchdog/wdt285.c15
333 files changed, 6960 insertions, 4321 deletions
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index 0a5f6b2114c..d672cfe7ca5 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -376,6 +376,8 @@ int braille_register_console(struct console *console, int index,
console->flags |= CON_ENABLED;
console->index = index;
braille_co = console;
+ register_keyboard_notifier(&keyboard_notifier_block);
+ register_vt_notifier(&vt_notifier_block);
return 0;
}
@@ -383,15 +385,8 @@ int braille_unregister_console(struct console *console)
{
if (braille_co != console)
return -EINVAL;
+ unregister_keyboard_notifier(&keyboard_notifier_block);
+ unregister_vt_notifier(&vt_notifier_block);
braille_co = NULL;
return 0;
}
-
-static int __init braille_init(void)
-{
- register_keyboard_notifier(&keyboard_notifier_block);
- register_vt_notifier(&vt_notifier_block);
- return 0;
-}
-
-console_initcall(braille_init);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 084109507c9..8dd3336efd7 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -165,8 +165,11 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
"firmware_node");
ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
"physical_node");
- if (acpi_dev->wakeup.flags.valid)
+ if (acpi_dev->wakeup.flags.valid) {
device_set_wakeup_capable(dev, true);
+ device_set_wakeup_enable(dev,
+ acpi_dev->wakeup.state.enabled);
+ }
}
return 0;
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 4ebbba2b6b1..bf5b04de02d 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -377,6 +377,14 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
return 0;
}
+static void physical_device_enable_wakeup(struct acpi_device *adev)
+{
+ struct device *dev = acpi_get_physical_device(adev->handle);
+
+ if (dev && device_can_wakeup(dev))
+ device_set_wakeup_enable(dev, adev->wakeup.state.enabled);
+}
+
static ssize_t
acpi_system_write_wakeup_device(struct file *file,
const char __user * buffer,
@@ -411,6 +419,7 @@ acpi_system_write_wakeup_device(struct file *file,
}
}
if (found_dev) {
+ physical_device_enable_wakeup(found_dev);
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
struct acpi_device *dev = container_of(node,
struct
@@ -428,6 +437,7 @@ acpi_system_write_wakeup_device(struct file *file,
dev->pnp.bus_id, found_dev->pnp.bus_id);
dev->wakeup.state.enabled =
found_dev->wakeup.state.enabled;
+ physical_device_enable_wakeup(dev);
}
}
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 11c8c19f0fb..f17cd4b572f 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -663,7 +663,7 @@ config HAVE_PATA_PLATFORM
config PATA_PLATFORM
tristate "Generic platform device PATA support"
- depends on EMBEDDED || ARCH_RPC || PPC || HAVE_PATA_PLATFORM
+ depends on EMBEDDED || PPC || HAVE_PATA_PLATFORM
help
This option enables support for generic directly connected ATA
devices commonly found on embedded systems.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 2e1a7cb2ed5..aeadd00411a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -267,8 +267,8 @@ struct ahci_port_priv {
* per PM slot */
};
-static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
-static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
@@ -316,6 +316,7 @@ static struct device_attribute *ahci_shost_attrs[] = {
static struct device_attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity,
+ &dev_attr_unload_heads,
NULL
};
@@ -820,10 +821,10 @@ static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
return 0;
}
-static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
- void __iomem *port_mmio = ahci_port_base(ap);
- int offset = ahci_scr_offset(ap, sc_reg);
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ int offset = ahci_scr_offset(link->ap, sc_reg);
if (offset) {
*val = readl(port_mmio + offset);
@@ -832,10 +833,10 @@ static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
return -EINVAL;
}
-static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
- void __iomem *port_mmio = ahci_port_base(ap);
- int offset = ahci_scr_offset(ap, sc_reg);
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ int offset = ahci_scr_offset(link->ap, sc_reg);
if (offset) {
writel(val, port_mmio + offset);
@@ -973,7 +974,7 @@ static void ahci_disable_alpm(struct ata_port *ap)
writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
/* go ahead and clean out PhyRdy Change from Serror too */
- ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
/*
* Clear flag to indicate that we should ignore all PhyRdy
@@ -1937,8 +1938,8 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
/* AHCI needs SError cleared; otherwise, it might lock up */
- ahci_scr_read(ap, SCR_ERROR, &serror);
- ahci_scr_write(ap, SCR_ERROR, serror);
+ ahci_scr_read(&ap->link, SCR_ERROR, &serror);
+ ahci_scr_write(&ap->link, SCR_ERROR, serror);
host_ehi->serror |= serror;
/* some controllers set IRQ_IF_ERR on device errors, ignore it */
@@ -2027,7 +2028,7 @@ static void ahci_port_intr(struct ata_port *ap)
if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
(status & PORT_IRQ_PHYRDY)) {
status &= ~PORT_IRQ_PHYRDY;
- ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
}
if (unlikely(status & PORT_IRQ_ERROR)) {
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index e6b4606e36b..e9e32ed6b1a 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -165,8 +165,10 @@ static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
static int ich_pata_cable_detect(struct ata_port *ap);
static u8 piix_vmw_bmdma_status(struct ata_port *ap);
-static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val);
-static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val);
+static int piix_sidpr_scr_read(struct ata_link *link,
+ unsigned int reg, u32 *val);
+static int piix_sidpr_scr_write(struct ata_link *link,
+ unsigned int reg, u32 val);
#ifdef CONFIG_PM
static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int piix_pci_device_resume(struct pci_dev *pdev);
@@ -278,12 +280,15 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PCH) */
+ { 0x8086, 0x3b28, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (PCH) */
{ 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
-
{ } /* terminate list */
};
@@ -582,6 +587,7 @@ static const struct ich_laptop ich_laptop[] = {
{ 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
{ 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
{ 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
+ { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
{ 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
{ 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
{ 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
@@ -885,23 +891,9 @@ static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev)
* Serial ATA Index/Data Pair Superset Registers access
*
* Beginning from ICH8, there's a sane way to access SCRs using index
- * and data register pair located at BAR5. This creates an
- * interesting problem of mapping two SCRs to one port.
- *
- * Although they have separate SCRs, the master and slave aren't
- * independent enough to be treated as separate links - e.g. softreset
- * resets both. Also, there's no protocol defined for hard resetting
- * singled device sharing the virtual port (no defined way to acquire
- * device signature). This is worked around by merging the SCR values
- * into one sensible value and requesting follow-up SRST after
- * hardreset.
- *
- * SCR merging is perfomed in nibbles which is the unit contents in
- * SCRs are organized. If two values are equal, the value is used.
- * When they differ, merge table which lists precedence of possible
- * values is consulted and the first match or the last entry when
- * nothing matches is used. When there's no merge table for the
- * specific nibble, value from the first port is used.
+ * and data register pair located at BAR5 which means that we have
+ * separate SCRs for master and slave. This is handled using libata
+ * slave_link facility.
*/
static const int piix_sidx_map[] = {
[SCR_STATUS] = 0,
@@ -909,120 +901,38 @@ static const int piix_sidx_map[] = {
[SCR_CONTROL] = 1,
};
-static void piix_sidpr_sel(struct ata_device *dev, unsigned int reg)
+static void piix_sidpr_sel(struct ata_link *link, unsigned int reg)
{
- struct ata_port *ap = dev->link->ap;
+ struct ata_port *ap = link->ap;
struct piix_host_priv *hpriv = ap->host->private_data;
- iowrite32(((ap->port_no * 2 + dev->devno) << 8) | piix_sidx_map[reg],
+ iowrite32(((ap->port_no * 2 + link->pmp) << 8) | piix_sidx_map[reg],
hpriv->sidpr + PIIX_SIDPR_IDX);
}
-static int piix_sidpr_read(struct ata_device *dev, unsigned int reg)
-{
- struct piix_host_priv *hpriv = dev->link->ap->host->private_data;
-
- piix_sidpr_sel(dev, reg);
- return ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
-}
-
-static void piix_sidpr_write(struct ata_device *dev, unsigned int reg, u32 val)
-{
- struct piix_host_priv *hpriv = dev->link->ap->host->private_data;
-
- piix_sidpr_sel(dev, reg);
- iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
-}
-
-static u32 piix_merge_scr(u32 val0, u32 val1, const int * const *merge_tbl)
-{
- u32 val = 0;
- int i, mi;
-
- for (i = 0, mi = 0; i < 32 / 4; i++) {
- u8 c0 = (val0 >> (i * 4)) & 0xf;
- u8 c1 = (val1 >> (i * 4)) & 0xf;
- u8 merged = c0;
- const int *cur;
-
- /* if no merge preference, assume the first value */
- cur = merge_tbl[mi];
- if (!cur)
- goto done;
- mi++;
-
- /* if two values equal, use it */
- if (c0 == c1)
- goto done;
-
- /* choose the first match or the last from the merge table */
- while (*cur != -1) {
- if (c0 == *cur || c1 == *cur)
- break;
- cur++;
- }
- if (*cur == -1)
- cur--;
- merged = *cur;
- done:
- val |= merged << (i * 4);
- }
-
- return val;
-}
-
-static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val)
+static int piix_sidpr_scr_read(struct ata_link *link,
+ unsigned int reg, u32 *val)
{
- const int * const sstatus_merge_tbl[] = {
- /* DET */ (const int []){ 1, 3, 0, 4, 3, -1 },
- /* SPD */ (const int []){ 2, 1, 0, -1 },
- /* IPM */ (const int []){ 6, 2, 1, 0, -1 },
- NULL,
- };
- const int * const scontrol_merge_tbl[] = {
- /* DET */ (const int []){ 1, 0, 4, 0, -1 },
- /* SPD */ (const int []){ 0, 2, 1, 0, -1 },
- /* IPM */ (const int []){ 0, 1, 2, 3, 0, -1 },
- NULL,
- };
- u32 v0, v1;
+ struct piix_host_priv *hpriv = link->ap->host->private_data;
if (reg >= ARRAY_SIZE(piix_sidx_map))
return -EINVAL;
- if (!(ap->flags & ATA_FLAG_SLAVE_POSS)) {
- *val = piix_sidpr_read(&ap->link.device[0], reg);
- return 0;
- }
-
- v0 = piix_sidpr_read(&ap->link.device[0], reg);
- v1 = piix_sidpr_read(&ap->link.device[1], reg);
-
- switch (reg) {
- case SCR_STATUS:
- *val = piix_merge_scr(v0, v1, sstatus_merge_tbl);
- break;
- case SCR_ERROR:
- *val = v0 | v1;
- break;
- case SCR_CONTROL:
- *val = piix_merge_scr(v0, v1, scontrol_merge_tbl);
- break;
- }
-
+ piix_sidpr_sel(link, reg);
+ *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
return 0;
}
-static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val)
+static int piix_sidpr_scr_write(struct ata_link *link,
+ unsigned int reg, u32 val)
{
+ struct piix_host_priv *hpriv = link->ap->host->private_data;
+
if (reg >= ARRAY_SIZE(piix_sidx_map))
return -EINVAL;
- piix_sidpr_write(&ap->link.device[0], reg, val);
-
- if (ap->flags & ATA_FLAG_SLAVE_POSS)
- piix_sidpr_write(&ap->link.device[1], reg, val);
-
+ piix_sidpr_sel(link, reg);
+ iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
return 0;
}
@@ -1363,28 +1273,28 @@ static const int *__devinit piix_init_sata_map(struct pci_dev *pdev,
return map;
}
-static void __devinit piix_init_sidpr(struct ata_host *host)
+static int __devinit piix_init_sidpr(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct piix_host_priv *hpriv = host->private_data;
- struct ata_device *dev0 = &host->ports[0]->link.device[0];
+ struct ata_link *link0 = &host->ports[0]->link;
u32 scontrol;
- int i;
+ int i, rc;
/* check for availability */
for (i = 0; i < 4; i++)
if (hpriv->map[i] == IDE)
- return;
+ return 0;
if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR))
- return;
+ return 0;
if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 ||
pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN)
- return;
+ return 0;
if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME))
- return;
+ return 0;
hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
@@ -1392,7 +1302,7 @@ static void __devinit piix_init_sidpr(struct ata_host *host)
* Give it a test drive by inhibiting power save modes which
* we'll do anyway.
*/
- scontrol = piix_sidpr_read(dev0, SCR_CONTROL);
+ piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
/* if IPM is already 3, SCR access is probably working. Don't
* un-inhibit power save modes as BIOS might have inhibited
@@ -1400,18 +1310,30 @@ static void __devinit piix_init_sidpr(struct ata_host *host)
*/
if ((scontrol & 0xf00) != 0x300) {
scontrol |= 0x300;
- piix_sidpr_write(dev0, SCR_CONTROL, scontrol);
- scontrol = piix_sidpr_read(dev0, SCR_CONTROL);
+ piix_sidpr_scr_write(link0, SCR_CONTROL, scontrol);
+ piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
if ((scontrol & 0xf00) != 0x300) {
dev_printk(KERN_INFO, host->dev, "SCR access via "
"SIDPR is available but doesn't work\n");
- return;
+ return 0;
}
}
- host->ports[0]->ops = &piix_sidpr_sata_ops;
- host->ports[1]->ops = &piix_sidpr_sata_ops;
+ /* okay, SCRs available, set ops and ask libata for slave_link */
+ for (i = 0; i < 2; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ap->ops = &piix_sidpr_sata_ops;
+
+ if (ap->flags & ATA_FLAG_SLAVE_POSS) {
+ rc = ata_slave_link_init(ap);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
}
static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
@@ -1521,7 +1443,9 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
/* initialize controller */
if (port_flags & ATA_FLAG_SATA) {
piix_init_pcs(host, piix_map_db_table[ent->driver_data]);
- piix_init_sidpr(host);
+ rc = piix_init_sidpr(host);
+ if (rc)
+ return rc;
}
/* apply IOCFG bit18 quirk */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 79e3a8e7a84..1ee9499bd34 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -163,6 +163,67 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+/*
+ * Iterator helpers. Don't use directly.
+ *
+ * LOCKING:
+ * Host lock or EH context.
+ */
+struct ata_link *__ata_port_next_link(struct ata_port *ap,
+ struct ata_link *link, bool dev_only)
+{
+ /* NULL link indicates start of iteration */
+ if (!link) {
+ if (dev_only && sata_pmp_attached(ap))
+ return ap->pmp_link;
+ return &ap->link;
+ }
+
+ /* we just iterated over the host master link, what's next? */
+ if (link == &ap->link) {
+ if (!sata_pmp_attached(ap)) {
+ if (unlikely(ap->slave_link) && !dev_only)
+ return ap->slave_link;
+ return NULL;
+ }
+ return ap->pmp_link;
+ }
+
+ /* slave_link excludes PMP */
+ if (unlikely(link == ap->slave_link))
+ return NULL;
+
+ /* iterate to the next PMP link */
+ if (++link < ap->pmp_link + ap->nr_pmp_links)
+ return link;
+ return NULL;
+}
+
+/**
+ * ata_dev_phys_link - find physical link for a device
+ * @dev: ATA device to look up physical link for
+ *
+ * Look up physical link which @dev is attached to. Note that
+ * this is different from @dev->link only when @dev is on slave
+ * link. For all other cases, it's the same as @dev->link.
+ *
+ * LOCKING:
+ * Don't care.
+ *
+ * RETURNS:
+ * Pointer to the found physical link.
+ */
+struct ata_link *ata_dev_phys_link(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+
+ if (!ap->slave_link)
+ return dev->link;
+ if (!dev->devno)
+ return &ap->link;
+ return ap->slave_link;
+}
+
/**
* ata_force_cbl - force cable type according to libata.force
* @ap: ATA port of interest
@@ -206,7 +267,8 @@ void ata_force_cbl(struct ata_port *ap)
* the host link and all fan-out ports connected via PMP. If the
* device part is specified as 0 (e.g. 1.00:), it specifies the
* first fan-out link not the host link. Device number 15 always
- * points to the host link whether PMP is attached or not.
+ * points to the host link whether PMP is attached or not. If the
+ * controller has slave link, device number 16 points to it.
*
* LOCKING:
* EH context.
@@ -214,12 +276,11 @@ void ata_force_cbl(struct ata_port *ap)
static void ata_force_link_limits(struct ata_link *link)
{
bool did_spd = false;
- int linkno, i;
+ int linkno = link->pmp;
+ int i;
if (ata_is_host_link(link))
- linkno = 15;
- else
- linkno = link->pmp;
+ linkno += 15;
for (i = ata_force_tbl_size - 1; i >= 0; i--) {
const struct ata_force_ent *fe = &ata_force_tbl[i];
@@ -266,9 +327,9 @@ static void ata_force_xfermask(struct ata_device *dev)
int alt_devno = devno;
int i;
- /* allow n.15 for the first device attached to host port */
- if (ata_is_host_link(dev->link) && devno == 0)
- alt_devno = 15;
+ /* allow n.15/16 for devices attached to host port */
+ if (ata_is_host_link(dev->link))
+ alt_devno += 15;
for (i = ata_force_tbl_size - 1; i >= 0; i--) {
const struct ata_force_ent *fe = &ata_force_tbl[i];
@@ -320,9 +381,9 @@ static void ata_force_horkage(struct ata_device *dev)
int alt_devno = devno;
int i;
- /* allow n.15 for the first device attached to host port */
- if (ata_is_host_link(dev->link) && devno == 0)
- alt_devno = 15;
+ /* allow n.15/16 for devices attached to host port */
+ if (ata_is_host_link(dev->link))
+ alt_devno += 15;
for (i = 0; i < ata_force_tbl_size; i++) {
const struct ata_force_ent *fe = &ata_force_tbl[i];
@@ -2681,7 +2742,7 @@ static void sata_print_link_status(struct ata_link *link)
return;
sata_scr_read(link, SCR_CONTROL, &scontrol);
- if (ata_link_online(link)) {
+ if (ata_phys_link_online(link)) {
tmp = (sstatus >> 4) & 0xf;
ata_link_printk(link, KERN_INFO,
"SATA link up %s (SStatus %X SControl %X)\n",
@@ -3372,6 +3433,12 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
int warned = 0;
+ /* Slave readiness can't be tested separately from master. On
+ * M/S emulation configuration, this function should be called
+ * only on the master and it will handle both master and slave.
+ */
+ WARN_ON(link == link->ap->slave_link);
+
if (time_after(nodev_deadline, deadline))
nodev_deadline = deadline;
@@ -3593,7 +3660,7 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
}
/* no point in trying softreset on offline link */
- if (ata_link_offline(link))
+ if (ata_phys_link_offline(link))
ehc->i.action &= ~ATA_EH_SOFTRESET;
return 0;
@@ -3671,7 +3738,7 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
if (rc)
goto out;
/* if link is offline nothing more to do */
- if (ata_link_offline(link))
+ if (ata_phys_link_offline(link))
goto out;
/* Link is online. From this point, -ENODEV too is an error. */
@@ -4868,10 +4935,8 @@ int sata_scr_valid(struct ata_link *link)
int sata_scr_read(struct ata_link *link, int reg, u32 *val)
{
if (ata_is_host_link(link)) {
- struct ata_port *ap = link->ap;
-
if (sata_scr_valid(link))
- return ap->ops->scr_read(ap, reg, val);
+ return link->ap->ops->scr_read(link, reg, val);
return -EOPNOTSUPP;
}
@@ -4897,10 +4962,8 @@ int sata_scr_read(struct ata_link *link, int reg, u32 *val)
int sata_scr_write(struct ata_link *link, int reg, u32 val)
{
if (ata_is_host_link(link)) {
- struct ata_port *ap = link->ap;
-
if (sata_scr_valid(link))
- return ap->ops->scr_write(ap, reg, val);
+ return link->ap->ops->scr_write(link, reg, val);
return -EOPNOTSUPP;
}
@@ -4925,13 +4988,12 @@ int sata_scr_write(struct ata_link *link, int reg, u32 val)
int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
{
if (ata_is_host_link(link)) {
- struct ata_port *ap = link->ap;
int rc;
if (sata_scr_valid(link)) {
- rc = ap->ops->scr_write(ap, reg, val);
+ rc = link->ap->ops->scr_write(link, reg, val);
if (rc == 0)
- rc = ap->ops->scr_read(ap, reg, &val);
+ rc = link->ap->ops->scr_read(link, reg, &val);
return rc;
}
return -EOPNOTSUPP;
@@ -4941,7 +5003,7 @@ int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
}
/**
- * ata_link_online - test whether the given link is online
+ * ata_phys_link_online - test whether the given link is online
* @link: ATA link to test
*
* Test whether @link is online. Note that this function returns
@@ -4952,20 +5014,20 @@ int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
* None.
*
* RETURNS:
- * 1 if the port online status is available and online.
+ * True if the port online status is available and online.
*/
-int ata_link_online(struct ata_link *link)
+bool ata_phys_link_online(struct ata_link *link)
{
u32 sstatus;
if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
(sstatus & 0xf) == 0x3)
- return 1;
- return 0;
+ return true;
+ return false;
}
/**
- * ata_link_offline - test whether the given link is offline
+ * ata_phys_link_offline - test whether the given link is offline
* @link: ATA link to test
*
* Test whether @link is offline. Note that this function
@@ -4976,16 +5038,68 @@ int ata_link_online(struct ata_link *link)
* None.
*
* RETURNS:
- * 1 if the port offline status is available and offline.
+ * True if the port offline status is available and offline.
*/
-int ata_link_offline(struct ata_link *link)
+bool ata_phys_link_offline(struct ata_link *link)
{
u32 sstatus;
if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
(sstatus & 0xf) != 0x3)
- return 1;
- return 0;
+ return true;
+ return false;
+}
+
+/**
+ * ata_link_online - test whether the given link is online
+ * @link: ATA link to test
+ *
+ * Test whether @link is online. This is identical to
+ * ata_phys_link_online() when there's no slave link. When
+ * there's a slave link, this function should only be called on
+ * the master link and will return true if any of M/S links is
+ * online.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * True if the port online status is available and online.
+ */
+bool ata_link_online(struct ata_link *link)
+{
+ struct ata_link *slave = link->ap->slave_link;
+
+ WARN_ON(link == slave); /* shouldn't be called on slave link */
+
+ return ata_phys_link_online(link) ||
+ (slave && ata_phys_link_online(slave));
+}
+
+/**
+ * ata_link_offline - test whether the given link is offline
+ * @link: ATA link to test
+ *
+ * Test whether @link is offline. This is identical to
+ * ata_phys_link_offline() when there's no slave link. When
+ * there's a slave link, this function should only be called on
+ * the master link and will return true if both M/S links are
+ * offline.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * True if the port offline status is available and offline.
+ */
+bool ata_link_offline(struct ata_link *link)
+{
+ struct ata_link *slave = link->ap->slave_link;
+
+ WARN_ON(link == slave); /* shouldn't be called on slave link */
+
+ return ata_phys_link_offline(link) &&
+ (!slave || ata_phys_link_offline(slave));
}
#ifdef CONFIG_PM
@@ -5127,11 +5241,11 @@ int ata_port_start(struct ata_port *ap)
*/
void ata_dev_init(struct ata_device *dev)
{
- struct ata_link *link = dev->link;
+ struct ata_link *link = ata_dev_phys_link(dev);
struct ata_port *ap = link->ap;
unsigned long flags;
- /* SATA spd limit is bound to the first device */
+ /* SATA spd limit is bound to the attached device, reset together */
link->sata_spd_limit = link->hw_sata_spd_limit;
link->sata_spd = 0;
@@ -5264,6 +5378,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q);
+ init_completion(&ap->park_req_pending);
init_timer_deferrable(&ap->fastdrain_timer);
ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
ap->fastdrain_timer.data = (unsigned long)ap;
@@ -5294,6 +5409,7 @@ static void ata_host_release(struct device *gendev, void *res)
scsi_host_put(ap->scsi_host);
kfree(ap->pmp_link);
+ kfree(ap->slave_link);
kfree(ap);
host->ports[i] = NULL;
}
@@ -5414,6 +5530,68 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
return host;
}
+/**
+ * ata_slave_link_init - initialize slave link
+ * @ap: port to initialize slave link for
+ *
+ * Create and initialize slave link for @ap. This enables slave
+ * link handling on the port.
+ *
+ * In libata, a port contains links and a link contains devices.
+ * There is single host link but if a PMP is attached to it,
+ * there can be multiple fan-out links. On SATA, there's usually
+ * a single device connected to a link but PATA and SATA
+ * controllers emulating TF based interface can have two - master
+ * and slave.
+ *
+ * However, there are a few controllers which don't fit into this
+ * abstraction too well - SATA controllers which emulate TF
+ * interface with both master and slave devices but also have
+ * separate SCR register sets for each device. These controllers
+ * need separate links for physical link handling
+ * (e.g. onlineness, link speed) but should be treated like a
+ * traditional M/S controller for everything else (e.g. command
+ * issue, softreset).
+ *
+ * slave_link is libata's way of handling this class of
+ * controllers without impacting core layer too much. For
+ * anything other than physical link handling, the default host
+ * link is used for both master and slave. For physical link
+ * handling, separate @ap->slave_link is used. All dirty details
+ * are implemented inside libata core layer. From LLD's POV, the
+ * only difference is that prereset, hardreset and postreset are
+ * called once more for the slave link, so the reset sequence
+ * looks like the following.
+ *
+ * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
+ * softreset(M) -> postreset(M) -> postreset(S)
+ *
+ * Note that softreset is called only for the master. Softreset
+ * resets both M/S by definition, so SRST on master should handle
+ * both (the standard method will work just fine).
+ *
+ * LOCKING:
+ * Should be called before host is registered.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int ata_slave_link_init(struct ata_port *ap)
+{
+ struct ata_link *link;
+
+ WARN_ON(ap->slave_link);
+ WARN_ON(ap->flags & ATA_FLAG_PMP);
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ ata_link_init(ap, link, 1);
+ ap->slave_link = link;
+ return 0;
+}
+
static void ata_host_stop(struct device *gendev, void *res)
{
struct ata_host *host = dev_get_drvdata(gendev);
@@ -5640,6 +5818,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
/* init sata_spd_limit to the current value */
sata_link_init_spd(&ap->link);
+ if (ap->slave_link)
+ sata_link_init_spd(ap->slave_link);
/* print per-port info to dmesg */
xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
@@ -6260,10 +6440,12 @@ EXPORT_SYMBOL_GPL(ata_base_port_ops);
EXPORT_SYMBOL_GPL(sata_port_ops);
EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
EXPORT_SYMBOL_GPL(ata_dummy_port_info);
+EXPORT_SYMBOL_GPL(__ata_port_next_link);
EXPORT_SYMBOL_GPL(ata_std_bios_param);
EXPORT_SYMBOL_GPL(ata_host_init);
EXPORT_SYMBOL_GPL(ata_host_alloc);
EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
+EXPORT_SYMBOL_GPL(ata_slave_link_init);
EXPORT_SYMBOL_GPL(ata_host_start);
EXPORT_SYMBOL_GPL(ata_host_register);
EXPORT_SYMBOL_GPL(ata_host_activate);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c1db2f234d2..a93247cc395 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -33,6 +33,7 @@
*/
#include <linux/kernel.h>
+#include <linux/blkdev.h>
#include <linux/pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -79,6 +80,8 @@ enum {
*/
ATA_EH_PRERESET_TIMEOUT = 10000,
ATA_EH_FASTDRAIN_INTERVAL = 3000,
+
+ ATA_EH_UA_TRIES = 5,
};
/* The following table determines how we sequence resets. Each entry
@@ -457,29 +460,29 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
* RETURNS:
* EH_HANDLED or EH_NOT_HANDLED
*/
-enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
+enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct ata_port *ap = ata_shost_to_port(host);
unsigned long flags;
struct ata_queued_cmd *qc;
- enum scsi_eh_timer_return ret;
+ enum blk_eh_timer_return ret;
DPRINTK("ENTER\n");
if (ap->ops->error_handler) {
- ret = EH_NOT_HANDLED;
+ ret = BLK_EH_NOT_HANDLED;
goto out;
}
- ret = EH_HANDLED;
+ ret = BLK_EH_HANDLED;
spin_lock_irqsave(ap->lock, flags);
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc) {
WARN_ON(qc->scsicmd != cmd);
qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
qc->err_mask |= AC_ERR_TIMEOUT;
- ret = EH_NOT_HANDLED;
+ ret = BLK_EH_NOT_HANDLED;
}
spin_unlock_irqrestore(ap->lock, flags);
@@ -831,7 +834,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
* Note that ATA_QCFLAG_FAILED is unconditionally set after
* this function completes.
*/
- scsi_req_abort_cmd(qc->scsicmd);
+ blk_abort_request(qc->scsicmd->request);
}
/**
@@ -1357,6 +1360,37 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
}
/**
+ * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
+ * @dev: target ATAPI device
+ * @r_sense_key: out parameter for sense_key
+ *
+ * Perform ATAPI TEST_UNIT_READY.
+ *
+ * LOCKING:
+ * EH context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, AC_ERR_* mask on failure.
+ */
+static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
+{
+ u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ ata_tf_init(dev, &tf);
+
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.command = ATA_CMD_PACKET;
+ tf.protocol = ATAPI_PROT_NODATA;
+
+ err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
+ if (err_mask == AC_ERR_DEV)
+ *r_sense_key = tf.feature >> 4;
+ return err_mask;
+}
+
+/**
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
* @dev: device to perform REQUEST_SENSE to
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1756,7 +1790,7 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
static unsigned int ata_eh_speed_down(struct ata_device *dev,
unsigned int eflags, unsigned int err_mask)
{
- struct ata_link *link = dev->link;
+ struct ata_link *link = ata_dev_phys_link(dev);
int xfer_ok = 0;
unsigned int verdict;
unsigned int action = 0;
@@ -1880,7 +1914,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
- if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link)
+ if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ ata_dev_phys_link(qc->dev) != link)
continue;
/* inherit upper level err_mask */
@@ -1967,6 +2002,23 @@ void ata_eh_autopsy(struct ata_port *ap)
ata_port_for_each_link(link, ap)
ata_eh_link_autopsy(link);
+ /* Handle the frigging slave link. Autopsy is done similarly
+ * but actions and flags are transferred over to the master
+ * link and handled from there.
+ */
+ if (ap->slave_link) {
+ struct ata_eh_context *mehc = &ap->link.eh_context;
+ struct ata_eh_context *sehc = &ap->slave_link->eh_context;
+
+ ata_eh_link_autopsy(ap->slave_link);
+
+ ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
+ mehc->i.action |= sehc->i.action;
+ mehc->i.dev_action[1] |= sehc->i.dev_action[1];
+ mehc->i.flags |= sehc->i.flags;
+ ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
+ }
+
/* Autopsy of fanout ports can affect host link autopsy.
* Perform host link autopsy last.
*/
@@ -2001,7 +2053,8 @@ static void ata_eh_link_report(struct ata_link *link)
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
- if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link ||
+ if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ ata_dev_phys_link(qc->dev) != link ||
((qc->flags & ATA_QCFLAG_QUIET) &&
qc->err_mask == AC_ERR_DEV))
continue;
@@ -2068,7 +2121,7 @@ static void ata_eh_link_report(struct ata_link *link)
char cdb_buf[70] = "";
if (!(qc->flags & ATA_QCFLAG_FAILED) ||
- qc->dev->link != link || !qc->err_mask)
+ ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
continue;
if (qc->dma_dir != DMA_NONE) {
@@ -2160,12 +2213,14 @@ void ata_eh_report(struct ata_port *ap)
}
static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
- unsigned int *classes, unsigned long deadline)
+ unsigned int *classes, unsigned long deadline,
+ bool clear_classes)
{
struct ata_device *dev;
- ata_link_for_each_dev(dev, link)
- classes[dev->devno] = ATA_DEV_UNKNOWN;
+ if (clear_classes)
+ ata_link_for_each_dev(dev, link)
+ classes[dev->devno] = ATA_DEV_UNKNOWN;
return reset(link, classes, deadline);
}
@@ -2187,17 +2242,20 @@ int ata_eh_reset(struct ata_link *link, int classify,
ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
{
struct ata_port *ap = link->ap;
+ struct ata_link *slave = ap->slave_link;
struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_eh_context *sehc = &slave->eh_context;
unsigned int *classes = ehc->classes;
unsigned int lflags = link->flags;
int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
int max_tries = 0, try = 0;
+ struct ata_link *failed_link;
struct ata_device *dev;
unsigned long deadline, now;
ata_reset_fn_t reset;
unsigned long flags;
u32 sstatus;
- int nr_known, rc;
+ int nr_unknown, rc;
/*
* Prepare to reset
@@ -2252,8 +2310,30 @@ int ata_eh_reset(struct ata_link *link, int classify,
}
if (prereset) {
- rc = prereset(link,
- ata_deadline(jiffies, ATA_EH_PRERESET_TIMEOUT));
+ unsigned long deadline = ata_deadline(jiffies,
+ ATA_EH_PRERESET_TIMEOUT);
+
+ if (slave) {
+ sehc->i.action &= ~ATA_EH_RESET;
+ sehc->i.action |= ehc->i.action;
+ }
+
+ rc = prereset(link, deadline);
+
+ /* If present, do prereset on slave link too. Reset
+ * is skipped iff both master and slave links report
+ * -ENOENT or clear ATA_EH_RESET.
+ */
+ if (slave && (rc == 0 || rc == -ENOENT)) {
+ int tmp;
+
+ tmp = prereset(slave, deadline);
+ if (tmp != -ENOENT)
+ rc = tmp;
+
+ ehc->i.action |= sehc->i.action;
+ }
+
if (rc) {
if (rc == -ENOENT) {
ata_link_printk(link, KERN_DEBUG,
@@ -2302,25 +2382,51 @@ int ata_eh_reset(struct ata_link *link, int classify,
else
ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
- rc = ata_do_reset(link, reset, classes, deadline);
- if (rc && rc != -EAGAIN)
+ rc = ata_do_reset(link, reset, classes, deadline, true);
+ if (rc && rc != -EAGAIN) {
+ failed_link = link;
goto fail;
+ }
+
+ /* hardreset slave link if existent */
+ if (slave && reset == hardreset) {
+ int tmp;
+
+ if (verbose)
+ ata_link_printk(slave, KERN_INFO,
+ "hard resetting link\n");
+ ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
+ tmp = ata_do_reset(slave, reset, classes, deadline,
+ false);
+ switch (tmp) {
+ case -EAGAIN:
+ rc = -EAGAIN;
+ case 0:
+ break;
+ default:
+ failed_link = slave;
+ rc = tmp;
+ goto fail;
+ }
+ }
+
+ /* perform follow-up SRST if necessary */
if (reset == hardreset &&
ata_eh_followup_srst_needed(link, rc, classes)) {
- /* okay, let's do follow-up softreset */
reset = softreset;
if (!reset) {
ata_link_printk(link, KERN_ERR,
"follow-up softreset required "
"but no softreset avaliable\n");
+ failed_link = link;
rc = -EINVAL;
goto fail;
}
ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
- rc = ata_do_reset(link, reset, classes, deadline);
+ rc = ata_do_reset(link, reset, classes, deadline, true);
}
} else {
if (verbose)
@@ -2341,7 +2447,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
dev->pio_mode = XFER_PIO_0;
dev->flags &= ~ATA_DFLAG_SLEEPING;
- if (ata_link_offline(link))
+ if (ata_phys_link_offline(ata_dev_phys_link(dev)))
continue;
/* apply class override */
@@ -2354,6 +2460,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
/* record current link speed */
if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
link->sata_spd = (sstatus >> 4) & 0xf;
+ if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
+ slave->sata_spd = (sstatus >> 4) & 0xf;
/* thaw the port */
if (ata_is_host_link(link))
@@ -2366,12 +2474,17 @@ int ata_eh_reset(struct ata_link *link, int classify,
* reset and here. This race is mediated by cross checking
* link onlineness and classification result later.
*/
- if (postreset)
+ if (postreset) {
postreset(link, classes);
+ if (slave)
+ postreset(slave, classes);
+ }
/* clear cached SError */
spin_lock_irqsave(link->ap->lock, flags);
link->eh_info.serror = 0;
+ if (slave)
+ slave->eh_info.serror = 0;
spin_unlock_irqrestore(link->ap->lock, flags);
/* Make sure onlineness and classification result correspond.
@@ -2381,19 +2494,21 @@ int ata_eh_reset(struct ata_link *link, int classify,
* link onlineness and classification result, those conditions
* can be reliably detected and retried.
*/
- nr_known = 0;
+ nr_unknown = 0;
ata_link_for_each_dev(dev, link) {
/* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
- if (classes[dev->devno] == ATA_DEV_UNKNOWN)
+ if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
classes[dev->devno] = ATA_DEV_NONE;
- else
- nr_known++;
+ if (ata_phys_link_online(ata_dev_phys_link(dev)))
+ nr_unknown++;
+ }
}
- if (classify && !nr_known && ata_link_online(link)) {
+ if (classify && nr_unknown) {
if (try < max_tries) {
ata_link_printk(link, KERN_WARNING, "link online but "
"device misclassified, retrying\n");
+ failed_link = link;
rc = -EAGAIN;
goto fail;
}
@@ -2404,6 +2519,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
/* reset successful, schedule revalidation */
ata_eh_done(link, NULL, ATA_EH_RESET);
+ if (slave)
+ ata_eh_done(slave, NULL, ATA_EH_RESET);
ehc->last_reset = jiffies;
ehc->i.action |= ATA_EH_REVALIDATE;
@@ -2411,6 +2528,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
out:
/* clear hotplug flag */
ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
+ if (slave)
+ sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~ATA_PFLAG_RESETTING;
@@ -2431,7 +2550,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
if (time_before(now, deadline)) {
unsigned long delta = deadline - now;
- ata_link_printk(link, KERN_WARNING,
+ ata_link_printk(failed_link, KERN_WARNING,
"reset failed (errno=%d), retrying in %u secs\n",
rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
@@ -2439,13 +2558,92 @@ int ata_eh_reset(struct ata_link *link, int classify,
delta = schedule_timeout_uninterruptible(delta);
}
- if (rc == -EPIPE || try == max_tries - 1)
+ if (try == max_tries - 1) {
sata_down_spd_limit(link);
+ if (slave)
+ sata_down_spd_limit(slave);
+ } else if (rc == -EPIPE)
+ sata_down_spd_limit(failed_link);
+
if (hardreset)
reset = hardreset;
goto retry;
}
+static inline void ata_eh_pull_park_action(struct ata_port *ap)
+{
+ struct ata_link *link;
+ struct ata_device *dev;
+ unsigned long flags;
+
+ /*
+ * This function can be thought of as an extended version of
+ * ata_eh_about_to_do() specially crafted to accommodate the
+ * requirements of ATA_EH_PARK handling. Since the EH thread
+ * does not leave the do {} while () loop in ata_eh_recover as
+ * long as the timeout for a park request to *one* device on
+ * the port has not expired, and since we still want to pick
+ * up park requests to other devices on the same port or
+ * timeout updates for the same device, we have to pull
+ * ATA_EH_PARK actions from eh_info into eh_context.i
+ * ourselves at the beginning of each pass over the loop.
+ *
+ * Additionally, all write accesses to &ap->park_req_pending
+ * through INIT_COMPLETION() (see below) or complete_all()
+ * (see ata_scsi_park_store()) are protected by the host lock.
+ * As a result we have that park_req_pending.done is zero on
+ * exit from this function, i.e. when ATA_EH_PARK actions for
+ * *all* devices on port ap have been pulled into the
+ * respective eh_context structs. If, and only if,
+ * park_req_pending.done is non-zero by the time we reach
+ * wait_for_completion_timeout(), another ATA_EH_PARK action
+ * has been scheduled for at least one of the devices on port
+ * ap and we have to cycle over the do {} while () loop in
+ * ata_eh_recover() again.
+ */
+
+ spin_lock_irqsave(ap->lock, flags);
+ INIT_COMPLETION(ap->park_req_pending);
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link) {
+ struct ata_eh_info *ehi = &link->eh_info;
+
+ link->eh_context.i.dev_action[dev->devno] |=
+ ehi->dev_action[dev->devno] & ATA_EH_PARK;
+ ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
+ }
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+
+static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
+{
+ struct ata_eh_context *ehc = &dev->link->eh_context;
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ ata_tf_init(dev, &tf);
+ if (park) {
+ ehc->unloaded_mask |= 1 << dev->devno;
+ tf.command = ATA_CMD_IDLEIMMEDIATE;
+ tf.feature = 0x44;
+ tf.lbal = 0x4c;
+ tf.lbam = 0x4e;
+ tf.lbah = 0x55;
+ } else {
+ ehc->unloaded_mask &= ~(1 << dev->devno);
+ tf.command = ATA_CMD_CHK_POWER;
+ }
+
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol |= ATA_PROT_NODATA;
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (park && (err_mask || tf.lbal != 0xc4)) {
+ ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
+ ehc->unloaded_mask &= ~(1 << dev->devno);
+ }
+}
+
static int ata_eh_revalidate_and_attach(struct ata_link *link,
struct ata_device **r_failed_dev)
{
@@ -2472,7 +2670,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
WARN_ON(dev->class == ATA_DEV_PMP);
- if (ata_link_offline(link)) {
+ if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
rc = -EIO;
goto err;
}
@@ -2610,6 +2808,53 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
return rc;
}
+/**
+ * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
+ * @dev: ATAPI device to clear UA for
+ *
+ * Resets and other operations can make an ATAPI device raise
+ * UNIT ATTENTION which causes the next operation to fail. This
+ * function clears UA.
+ *
+ * LOCKING:
+ * EH context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int atapi_eh_clear_ua(struct ata_device *dev)
+{
+ int i;
+
+ for (i = 0; i < ATA_EH_UA_TRIES; i++) {
+ u8 sense_buffer[SCSI_SENSE_BUFFERSIZE];
+ u8 sense_key = 0;
+ unsigned int err_mask;
+
+ err_mask = atapi_eh_tur(dev, &sense_key);
+ if (err_mask != 0 && err_mask != AC_ERR_DEV) {
+ ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
+ "failed (err_mask=0x%x)\n", err_mask);
+ return -EIO;
+ }
+
+ if (!err_mask || sense_key != UNIT_ATTENTION)
+ return 0;
+
+ err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_WARNING, "failed to clear "
+ "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
+ return -EIO;
+ }
+ }
+
+ ata_dev_printk(dev, KERN_WARNING,
+ "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
+
+ return 0;
+}
+
static int ata_link_nr_enabled(struct ata_link *link)
{
struct ata_device *dev;
@@ -2697,7 +2942,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
/* This is the last chance, better to slow
* down than lose it.
*/
- sata_down_spd_limit(dev->link);
+ sata_down_spd_limit(ata_dev_phys_link(dev));
ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
}
}
@@ -2707,7 +2952,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
ata_dev_disable(dev);
/* detach if offline */
- if (ata_link_offline(dev->link))
+ if (ata_phys_link_offline(ata_dev_phys_link(dev)))
ata_eh_detach_dev(dev);
/* schedule probe if necessary */
@@ -2755,7 +3000,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
struct ata_device *dev;
int nr_failed_devs;
int rc;
- unsigned long flags;
+ unsigned long flags, deadline;
DPRINTK("ENTER\n");
@@ -2829,6 +3074,56 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
}
}
+ do {
+ unsigned long now;
+
+ /*
+ * clears ATA_EH_PARK in eh_info and resets
+ * ap->park_req_pending
+ */
+ ata_eh_pull_park_action(ap);
+
+ deadline = jiffies;
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link) {
+ struct ata_eh_context *ehc = &link->eh_context;
+ unsigned long tmp;
+
+ if (dev->class != ATA_DEV_ATA)
+ continue;
+ if (!(ehc->i.dev_action[dev->devno] &
+ ATA_EH_PARK))
+ continue;
+ tmp = dev->unpark_deadline;
+ if (time_before(deadline, tmp))
+ deadline = tmp;
+ else if (time_before_eq(tmp, jiffies))
+ continue;
+ if (ehc->unloaded_mask & (1 << dev->devno))
+ continue;
+
+ ata_eh_park_issue_cmd(dev, 1);
+ }
+ }
+
+ now = jiffies;
+ if (time_before_eq(deadline, now))
+ break;
+
+ deadline = wait_for_completion_timeout(&ap->park_req_pending,
+ deadline - now);
+ } while (deadline);
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link) {
+ if (!(link->eh_context.unloaded_mask &
+ (1 << dev->devno)))
+ continue;
+
+ ata_eh_park_issue_cmd(dev, 0);
+ ata_eh_done(link, dev, ATA_EH_PARK);
+ }
+ }
+
/* the rest */
ata_port_for_each_link(link, ap) {
struct ata_eh_context *ehc = &link->eh_context;
@@ -2852,6 +3147,20 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
ehc->i.flags &= ~ATA_EHI_SETMODE;
}
+ /* If reset has been issued, clear UA to avoid
+ * disrupting the current users of the device.
+ */
+ if (ehc->i.flags & ATA_EHI_DID_RESET) {
+ ata_link_for_each_dev(dev, link) {
+ if (dev->class != ATA_DEV_ATAPI)
+ continue;
+ rc = atapi_eh_clear_ua(dev);
+ if (rc)
+ goto dev_fail;
+ }
+ }
+
+ /* configure link power saving */
if (ehc->i.action & ATA_EH_LPM)
ata_link_for_each_dev(dev, link)
ata_dev_enable_pm(dev, ap->pm_policy);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index b9d3ba423cb..59fe051957e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -183,6 +183,105 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
ata_scsi_lpm_show, ata_scsi_lpm_put);
EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
+static ssize_t ata_scsi_park_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap;
+ struct ata_link *link;
+ struct ata_device *dev;
+ unsigned long flags;
+ unsigned int uninitialized_var(msecs);
+ int rc = 0;
+
+ ap = ata_shost_to_port(sdev->host);
+
+ spin_lock_irqsave(ap->lock, flags);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (!dev) {
+ rc = -ENODEV;
+ goto unlock;
+ }
+ if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
+ rc = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ link = dev->link;
+ if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
+ link->eh_context.unloaded_mask & (1 << dev->devno) &&
+ time_after(dev->unpark_deadline, jiffies))
+ msecs = jiffies_to_msecs(dev->unpark_deadline - jiffies);
+ else
+ msecs = 0;
+
+unlock:
+ spin_unlock_irq(ap->lock);
+
+ return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
+}
+
+static ssize_t ata_scsi_park_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct scsi_device *sdev = to_scsi_device(device);
+ struct ata_port *ap;
+ struct ata_device *dev;
+ long int input;
+ unsigned long flags;
+ int rc;
+
+ rc = strict_strtol(buf, 10, &input);
+ if (rc || input < -2)
+ return -EINVAL;
+ if (input > ATA_TMOUT_MAX_PARK) {
+ rc = -EOVERFLOW;
+ input = ATA_TMOUT_MAX_PARK;
+ }
+
+ ap = ata_shost_to_port(sdev->host);
+
+ spin_lock_irqsave(ap->lock, flags);
+ dev = ata_scsi_find_dev(ap, sdev);
+ if (unlikely(!dev)) {
+ rc = -ENODEV;
+ goto unlock;
+ }
+ if (dev->class != ATA_DEV_ATA) {
+ rc = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ if (input >= 0) {
+ if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
+ rc = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ dev->unpark_deadline = ata_deadline(jiffies, input);
+ dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
+ ata_port_schedule_eh(ap);
+ complete(&ap->park_req_pending);
+ } else {
+ switch (input) {
+ case -1:
+ dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
+ break;
+ case -2:
+ dev->flags |= ATA_DFLAG_NO_UNLOAD;
+ break;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return rc ? rc : len;
+}
+DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
+ ata_scsi_park_show, ata_scsi_park_store);
+EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
+
static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
{
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
@@ -269,6 +368,12 @@ DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show,
ata_scsi_activity_store);
EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
+struct device_attribute *ata_common_sdev_attrs[] = {
+ &dev_attr_unload_heads,
+ NULL
+};
+EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
+
static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
@@ -954,6 +1059,9 @@ static int atapi_drain_needed(struct request *rq)
static int ata_scsi_dev_config(struct scsi_device *sdev,
struct ata_device *dev)
{
+ if (!ata_id_has_unload(dev->id))
+ dev->flags |= ATA_DFLAG_NO_UNLOAD;
+
/* configure max sectors */
blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
@@ -977,6 +1085,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
} else {
+ if (ata_id_is_ssd(dev->id))
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT,
+ sdev->request_queue);
+
/* ATA devices must be sector aligned */
blk_queue_update_dma_alignment(sdev->request_queue,
ATA_SECT_SIZE - 1);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index ade5c75b614..fe2839e5877 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -70,6 +70,7 @@ extern int atapi_passthru16;
extern int libata_fua;
extern int libata_noacpi;
extern int libata_allow_tpm;
+extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
extern void ata_force_cbl(struct ata_port *ap);
extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
@@ -107,6 +108,8 @@ extern void ata_qc_issue(struct ata_queued_cmd *qc);
extern void __ata_qc_complete(struct ata_queued_cmd *qc);
extern int atapi_check_dma(struct ata_queued_cmd *qc);
extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
+extern bool ata_phys_link_online(struct ata_link *link);
+extern bool ata_phys_link_offline(struct ata_link *link);
extern void ata_dev_init(struct ata_device *dev);
extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp);
extern int sata_link_init_spd(struct ata_link *link);
@@ -152,7 +155,7 @@ extern int ata_bus_probe(struct ata_port *ap);
/* libata-eh.c */
extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
-extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
+extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
extern void ata_scsi_error(struct Scsi_Host *host);
extern void ata_port_wait_eh(struct ata_port *ap);
extern void ata_eh_fastdrain_timerfn(unsigned long arg);
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index d3932901a3b..1266924c11f 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1632,6 +1632,8 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev)
return -ENODEV;
}
+ dev_set_drvdata(&pdev->dev, host);
+
return 0;
}
@@ -1648,6 +1650,7 @@ static int __devexit bfin_atapi_remove(struct platform_device *pdev)
struct ata_host *host = dev_get_drvdata(dev);
ata_host_detach(host);
+ dev_set_drvdata(&pdev->dev, NULL);
peripheral_free_list(atapi_io_port);
@@ -1655,27 +1658,44 @@ static int __devexit bfin_atapi_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
+static int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
{
- return 0;
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ if (host)
+ return ata_host_suspend(host, state);
+ else
+ return 0;
}
-int bfin_atapi_resume(struct platform_device *pdev)
+static int bfin_atapi_resume(struct platform_device *pdev)
{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int ret;
+
+ if (host) {
+ ret = bfin_reset_controller(host);
+ if (ret) {
+ printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+ return ret;
+ }
+ ata_host_resume(host);
+ }
+
return 0;
}
+#else
+#define bfin_atapi_suspend NULL
+#define bfin_atapi_resume NULL
#endif
static struct platform_driver bfin_atapi_driver = {
.probe = bfin_atapi_probe,
.remove = __devexit_p(bfin_atapi_remove),
+ .suspend = bfin_atapi_suspend,
+ .resume = bfin_atapi_resume,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
- .suspend = bfin_atapi_suspend,
- .resume = bfin_atapi_resume,
-#endif
},
};
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index e970b227fbc..a598bb36aaf 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -230,7 +230,7 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
tmpbyte & 1, tmpbyte & 0x30);
*try_mmio = 0;
-#ifdef CONFIG_PPC_MERGE
+#ifdef CONFIG_PPC
if (machine_is(cell))
*try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5);
#endif
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 3924e7209a4..1a56db92ff7 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -469,10 +469,10 @@ static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
return true;
}
-static int sata_fsl_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
- u32 val)
+static int sata_fsl_scr_write(struct ata_link *link,
+ unsigned int sc_reg_in, u32 val)
{
- struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ struct sata_fsl_host_priv *host_priv = link->ap->host->private_data;
void __iomem *ssr_base = host_priv->ssr_base;
unsigned int sc_reg;
@@ -493,10 +493,10 @@ static int sata_fsl_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
return 0;
}
-static int sata_fsl_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
- u32 *val)
+static int sata_fsl_scr_read(struct ata_link *link,
+ unsigned int sc_reg_in, u32 *val)
{
- struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+ struct sata_fsl_host_priv *host_priv = link->ap->host->private_data;
void __iomem *ssr_base = host_priv->ssr_base;
unsigned int sc_reg;
@@ -645,12 +645,12 @@ static int sata_fsl_port_start(struct ata_port *ap)
* Workaround for 8315DS board 3gbps link-up issue,
* currently limit SATA port to GEN1 speed
*/
- sata_fsl_scr_read(ap, SCR_CONTROL, &temp);
+ sata_fsl_scr_read(&ap->link, SCR_CONTROL, &temp);
temp &= ~(0xF << 4);
temp |= (0x1 << 4);
- sata_fsl_scr_write(ap, SCR_CONTROL, temp);
+ sata_fsl_scr_write(&ap->link, SCR_CONTROL, temp);
- sata_fsl_scr_read(ap, SCR_CONTROL, &temp);
+ sata_fsl_scr_read(&ap->link, SCR_CONTROL, &temp);
dev_printk(KERN_WARNING, dev, "scr_control, speed limited to %x\n",
temp);
#endif
@@ -868,7 +868,7 @@ issue_srst:
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base), ioread32(CC + hcr_base));
- sata_fsl_scr_read(ap, SCR_ERROR, &Serror);
+ sata_fsl_scr_read(&ap->link, SCR_ERROR, &Serror);
DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
@@ -972,9 +972,9 @@ static void sata_fsl_error_intr(struct ata_port *ap)
* Handle & Clear SError
*/
- sata_fsl_scr_read(ap, SCR_ERROR, &SError);
+ sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
if (unlikely(SError & 0xFFFF0000)) {
- sata_fsl_scr_write(ap, SCR_ERROR, SError);
+ sata_fsl_scr_write(&ap->link, SCR_ERROR, SError);
}
DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n",
@@ -1091,7 +1091,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
hstatus = ioread32(hcr_base + HSTATUS);
- sata_fsl_scr_read(ap, SCR_ERROR, &SError);
+ sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
if (unlikely(SError & 0xFFFF0000)) {
DPRINTK("serror @host_intr : 0x%x\n", SError);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 5032c32fa50..fbbd87c96f1 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -269,9 +269,9 @@ static void inic_reset_port(void __iomem *port_base)
writeb(0xff, port_base + PORT_IRQ_STAT);
}
-static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
+static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
{
- void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR;
+ void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
void __iomem *addr;
if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
@@ -286,9 +286,9 @@ static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
return 0;
}
-static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
+static int inic_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
{
- void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR;
+ void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
return -EINVAL;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index c815f8ecf6e..2b24ae58b52 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -493,10 +493,10 @@ struct mv_hw_ops {
void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
};
-static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
-static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
-static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
-static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
+static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
+static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
+static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
+static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static int mv_qc_defer(struct ata_queued_cmd *qc);
@@ -1070,23 +1070,23 @@ static unsigned int mv_scr_offset(unsigned int sc_reg_in)
return ofs;
}
-static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
+static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
{
unsigned int ofs = mv_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
- *val = readl(mv_ap_base(ap) + ofs);
+ *val = readl(mv_ap_base(link->ap) + ofs);
return 0;
} else
return -EINVAL;
}
-static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
+static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
{
unsigned int ofs = mv_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
- writelfl(val, mv_ap_base(ap) + ofs);
+ writelfl(val, mv_ap_base(link->ap) + ofs);
return 0;
} else
return -EINVAL;
@@ -2251,11 +2251,11 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
return ofs;
}
-static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
+static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
{
- struct mv_host_priv *hpriv = ap->host->private_data;
+ struct mv_host_priv *hpriv = link->ap->host->private_data;
void __iomem *mmio = hpriv->base;
- void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
+ void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
@@ -2265,11 +2265,11 @@ static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
return -EINVAL;
}
-static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
+static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
{
- struct mv_host_priv *hpriv = ap->host->private_data;
+ struct mv_host_priv *hpriv = link->ap->host->private_data;
void __iomem *mmio = hpriv->base;
- void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
+ void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU) {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 1e1f3f3757a..fae3841de0d 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -302,13 +302,15 @@ static void nv_ck804_host_stop(struct ata_host *host);
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
-static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
-static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
+static int nv_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
static int nv_adma_slave_config(struct scsi_device *sdev);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
@@ -403,28 +405,45 @@ static struct scsi_host_template nv_swncq_sht = {
.slave_configure = nv_swncq_slave_config,
};
-static struct ata_port_operations nv_generic_ops = {
+/* OSDL bz3352 reports that some nv controllers can't determine device
+ * signature reliably and nv_hardreset is implemented to work around
+ * the problem. This was reported on nf3 and it's unclear whether any
+ * other controllers are affected. However, the workaround has been
+ * applied to all variants and there isn't much to gain by trying to
+ * find out exactly which ones are affected at this point especially
+ * because NV has moved over to ahci for newer controllers.
+ */
+static struct ata_port_operations nv_common_ops = {
.inherits = &ata_bmdma_port_ops,
- .hardreset = ATA_OP_NULL,
+ .hardreset = nv_hardreset,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
};
+/* OSDL bz11195 reports that link doesn't come online after hardreset
+ * on generic nv's and there have been several other similar reports
+ * on linux-ide. Disable hardreset for generic nv's.
+ */
+static struct ata_port_operations nv_generic_ops = {
+ .inherits = &nv_common_ops,
+ .hardreset = ATA_OP_NULL,
+};
+
static struct ata_port_operations nv_nf2_ops = {
- .inherits = &nv_generic_ops,
+ .inherits = &nv_common_ops,
.freeze = nv_nf2_freeze,
.thaw = nv_nf2_thaw,
};
static struct ata_port_operations nv_ck804_ops = {
- .inherits = &nv_generic_ops,
+ .inherits = &nv_common_ops,
.freeze = nv_ck804_freeze,
.thaw = nv_ck804_thaw,
.host_stop = nv_ck804_host_stop,
};
static struct ata_port_operations nv_adma_ops = {
- .inherits = &nv_generic_ops,
+ .inherits = &nv_common_ops,
.check_atapi_dma = nv_adma_check_atapi_dma,
.sff_tf_read = nv_adma_tf_read,
@@ -448,7 +467,7 @@ static struct ata_port_operations nv_adma_ops = {
};
static struct ata_port_operations nv_swncq_ops = {
- .inherits = &nv_generic_ops,
+ .inherits = &nv_common_ops,
.qc_defer = ata_std_qc_defer,
.qc_prep = nv_swncq_qc_prep,
@@ -1492,21 +1511,21 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
return ret;
}
-static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
+ *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
-static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+ iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
@@ -1586,6 +1605,21 @@ static void nv_mcp55_thaw(struct ata_port *ap)
ata_sff_thaw(ap);
}
+static int nv_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ int rc;
+
+ /* SATA hardreset fails to retrieve proper device signature on
+ * some controllers. Request follow up SRST. For more info,
+ * see http://bugzilla.kernel.org/show_bug.cgi?id=3352
+ */
+ rc = sata_sff_hardreset(link, class, deadline);
+ if (rc)
+ return rc;
+ return -EAGAIN;
+}
+
static void nv_adma_error_handler(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
@@ -2184,9 +2218,9 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
if (!pp->qc_active)
return;
- if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
+ if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
return;
- ap->ops->scr_write(ap, SCR_ERROR, serror);
+ ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
if (ata_stat & ATA_ERR) {
ata_ehi_clear_desc(ehi);
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 030665ba76b..750d8cdc00c 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -137,8 +137,8 @@ struct pdc_port_priv {
dma_addr_t pkt_dma;
};
-static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
-static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int pdc_common_port_start(struct ata_port *ap);
static int pdc_sata_port_start(struct ata_port *ap);
@@ -386,19 +386,21 @@ static int pdc_sata_cable_detect(struct ata_port *ap)
return ATA_CBL_SATA;
}
-static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static int pdc_sata_scr_read(struct ata_link *link,
+ unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+ *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
-static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int pdc_sata_scr_write(struct ata_link *link,
+ unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+ writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
@@ -731,7 +733,7 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
if (sata_scr_valid(&ap->link)) {
u32 serror;
- pdc_sata_scr_read(ap, SCR_ERROR, &serror);
+ pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror);
ehi->serror |= serror;
}
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 1600107047c..a000c86ac85 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -111,8 +111,8 @@ struct qs_port_priv {
qs_state_t state;
};
-static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
-static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host *host);
@@ -242,11 +242,11 @@ static int qs_prereset(struct ata_link *link, unsigned long deadline)
return ata_sff_prereset(link, deadline);
}
-static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- *val = readl(ap->ioaddr.scr_addr + (sc_reg * 8));
+ *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8));
return 0;
}
@@ -256,11 +256,11 @@ static void qs_error_handler(struct ata_port *ap)
ata_std_error_handler(ap);
}
-static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- writel(val, ap->ioaddr.scr_addr + (sc_reg * 8));
+ writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8));
return 0;
}
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 88bf4212590..031d7b7dee3 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -115,8 +115,8 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int sil_pci_device_resume(struct pci_dev *pdev);
#endif
static void sil_dev_config(struct ata_device *dev);
-static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
-static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
static void sil_freeze(struct ata_port *ap);
static void sil_thaw(struct ata_port *ap);
@@ -317,9 +317,9 @@ static inline void __iomem *sil_scr_addr(struct ata_port *ap,
return NULL;
}
-static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
- void __iomem *mmio = sil_scr_addr(ap, sc_reg);
+ void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
if (mmio) {
*val = readl(mmio);
@@ -328,9 +328,9 @@ static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
return -EINVAL;
}
-static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
- void __iomem *mmio = sil_scr_addr(ap, sc_reg);
+ void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
if (mmio) {
writel(val, mmio);
@@ -352,8 +352,8 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
* controllers continue to assert IRQ as long as
* SError bits are pending. Clear SError immediately.
*/
- sil_scr_read(ap, SCR_ERROR, &serror);
- sil_scr_write(ap, SCR_ERROR, serror);
+ sil_scr_read(&ap->link, SCR_ERROR, &serror);
+ sil_scr_write(&ap->link, SCR_ERROR, serror);
/* Sometimes spurious interrupts occur, double check
* it's PHYRDY CHG.
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 84ffcc26a74..4621807a1a6 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -340,8 +340,8 @@ struct sil24_port_priv {
};
static void sil24_dev_config(struct ata_device *dev);
-static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val);
-static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
+static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
+static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
static int sil24_qc_defer(struct ata_queued_cmd *qc);
static void sil24_qc_prep(struct ata_queued_cmd *qc);
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
@@ -504,9 +504,9 @@ static int sil24_scr_map[] = {
[SCR_ACTIVE] = 3,
};
-static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
+static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
{
- void __iomem *scr_addr = sil24_port_base(ap) + PORT_SCONTROL;
+ void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
void __iomem *addr;
@@ -517,9 +517,9 @@ static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
return -EINVAL;
}
-static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
+static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
{
- void __iomem *scr_addr = sil24_port_base(ap) + PORT_SCONTROL;
+ void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
void __iomem *addr;
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 1010b3069bd..9c43b4e7c4a 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -64,8 +64,8 @@ enum {
};
static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
-static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static const struct pci_device_id sis_pci_tbl[] = {
{ PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */
@@ -134,10 +134,11 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
return addr;
}
-static u32 sis_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static u32 sis_scr_cfg_read(struct ata_link *link,
+ unsigned int sc_reg, u32 *val)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
+ struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+ unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
u32 val2 = 0;
u8 pmr;
@@ -158,10 +159,11 @@ static u32 sis_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
return 0;
}
-static int sis_scr_cfg_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int sis_scr_cfg_write(struct ata_link *link,
+ unsigned int sc_reg, u32 val)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
+ struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+ unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
u8 pmr;
if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
@@ -178,8 +180,9 @@ static int sis_scr_cfg_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
return 0;
}
-static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
+ struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 pmr;
@@ -187,7 +190,7 @@ static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
return -EINVAL;
if (ap->flags & SIS_FLAG_CFGSCR)
- return sis_scr_cfg_read(ap, sc_reg, val);
+ return sis_scr_cfg_read(link, sc_reg, val);
pci_read_config_byte(pdev, SIS_PMR, &pmr);
@@ -202,8 +205,9 @@ static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
return 0;
}
-static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
+ struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 pmr;
@@ -213,7 +217,7 @@ static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
pci_read_config_byte(pdev, SIS_PMR, &pmr);
if (ap->flags & SIS_FLAG_CFGSCR)
- return sis_scr_cfg_write(ap, sc_reg, val);
+ return sis_scr_cfg_write(link, sc_reg, val);
else {
iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index fb13b82aacb..609d147813a 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -123,20 +123,22 @@ static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc)
}
}
-static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static int k2_sata_scr_read(struct ata_link *link,
+ unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+ *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
-static int k2_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int k2_sata_scr_write(struct ata_link *link,
+ unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+ writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index db529b84994..019575bb3e0 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -57,8 +57,8 @@ struct uli_priv {
};
static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static int uli_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
-static int uli_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static const struct pci_device_id uli_pci_tbl[] = {
{ PCI_VDEVICE(AL, 0x5289), uli_5289 },
@@ -107,39 +107,39 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
}
-static u32 uli_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg)
+static u32 uli_scr_cfg_read(struct ata_link *link, unsigned int sc_reg)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
+ struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+ unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
u32 val;
pci_read_config_dword(pdev, cfg_addr, &val);
return val;
}
-static void uli_scr_cfg_write(struct ata_port *ap, unsigned int scr, u32 val)
+static void uli_scr_cfg_write(struct ata_link *link, unsigned int scr, u32 val)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- unsigned int cfg_addr = get_scr_cfg_addr(ap, scr);
+ struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
+ unsigned int cfg_addr = get_scr_cfg_addr(link->ap, scr);
pci_write_config_dword(pdev, cfg_addr, val);
}
-static int uli_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- *val = uli_scr_cfg_read(ap, sc_reg);
+ *val = uli_scr_cfg_read(link, sc_reg);
return 0;
}
-static int uli_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
return -EINVAL;
- uli_scr_cfg_write(ap, sc_reg, val);
+ uli_scr_cfg_write(link, sc_reg, val);
return 0;
}
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 96deeb354e1..1cfa74535d9 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -68,8 +68,8 @@ enum {
};
static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
-static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static void svia_noop_freeze(struct ata_port *ap);
static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
static int vt6421_pata_cable_detect(struct ata_port *ap);
@@ -152,19 +152,19 @@ MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
MODULE_VERSION(DRV_VERSION);
-static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- *val = ioread32(ap->ioaddr.scr_addr + (4 * sc_reg));
+ *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg));
return 0;
}
-static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg));
+ iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg));
return 0;
}
@@ -210,20 +210,20 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
goto skip_scr;
/* Resume phy. This is the old SATA resume sequence */
- svia_scr_write(ap, SCR_CONTROL, 0x300);
- svia_scr_read(ap, SCR_CONTROL, &scontrol); /* flush */
+ svia_scr_write(link, SCR_CONTROL, 0x300);
+ svia_scr_read(link, SCR_CONTROL, &scontrol); /* flush */
/* wait for phy to become ready, if necessary */
do {
msleep(200);
- svia_scr_read(ap, SCR_STATUS, &sstatus);
+ svia_scr_read(link, SCR_STATUS, &sstatus);
if ((sstatus & 0xf) != 1)
break;
} while (time_before(jiffies, timeout));
/* open code sata_print_link_status() */
- svia_scr_read(ap, SCR_STATUS, &sstatus);
- svia_scr_read(ap, SCR_CONTROL, &scontrol);
+ svia_scr_read(link, SCR_STATUS, &sstatus);
+ svia_scr_read(link, SCR_CONTROL, &scontrol);
online = (sstatus & 0xf) == 0x3;
@@ -232,7 +232,7 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
online ? "up" : "down", sstatus, scontrol);
/* SStatus is read one more time */
- svia_scr_read(ap, SCR_STATUS, &sstatus);
+ svia_scr_read(link, SCR_STATUS, &sstatus);
if (!online) {
/* tell EH to bail */
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index f3d635c0a2e..c57cdff9e6b 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -98,20 +98,22 @@ enum {
VSC_SATA_INT_PHY_CHANGE),
};
-static int vsc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static int vsc_sata_scr_read(struct ata_link *link,
+ unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+ *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
-static int vsc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int vsc_sata_scr_write(struct ata_link *link,
+ unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
- writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+ writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 31dc0cd84af..0a5f055dffb 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -54,7 +54,7 @@ struct driver_private {
*/
struct class_private {
struct kset class_subsys;
- struct list_head class_devices;
+ struct klist class_devices;
struct list_head class_interfaces;
struct kset class_dirs;
struct mutex class_mutex;
diff --git a/drivers/base/class.c b/drivers/base/class.c
index cc5e28c8885..eb85e431230 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -135,6 +135,20 @@ static void remove_class_attrs(struct class *cls)
}
}
+static void klist_class_dev_get(struct klist_node *n)
+{
+ struct device *dev = container_of(n, struct device, knode_class);
+
+ get_device(dev);
+}
+
+static void klist_class_dev_put(struct klist_node *n)
+{
+ struct device *dev = container_of(n, struct device, knode_class);
+
+ put_device(dev);
+}
+
int __class_register(struct class *cls, struct lock_class_key *key)
{
struct class_private *cp;
@@ -145,7 +159,7 @@ int __class_register(struct class *cls, struct lock_class_key *key)
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
- INIT_LIST_HEAD(&cp->class_devices);
+ klist_init(&cp->class_devices, klist_class_dev_get, klist_class_dev_put);
INIT_LIST_HEAD(&cp->class_interfaces);
kset_init(&cp->class_dirs);
__mutex_init(&cp->class_mutex, "struct class mutex", key);
@@ -269,6 +283,71 @@ char *make_class_name(const char *name, struct kobject *kobj)
#endif
/**
+ * class_dev_iter_init - initialize class device iterator
+ * @iter: class iterator to initialize
+ * @class: the class we wanna iterate over
+ * @start: the device to start iterating from, if any
+ * @type: device_type of the devices to iterate over, NULL for all
+ *
+ * Initialize class iterator @iter such that it iterates over devices
+ * of @class. If @start is set, the list iteration will start there,
+ * otherwise if it is NULL, the iteration starts at the beginning of
+ * the list.
+ */
+void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
+ struct device *start, const struct device_type *type)
+{
+ struct klist_node *start_knode = NULL;
+
+ if (start)
+ start_knode = &start->knode_class;
+ klist_iter_init_node(&class->p->class_devices, &iter->ki, start_knode);
+ iter->type = type;
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_init);
+
+/**
+ * class_dev_iter_next - iterate to the next device
+ * @iter: class iterator to proceed
+ *
+ * Proceed @iter to the next device and return it. Returns NULL if
+ * iteration is complete.
+ *
+ * The returned device is referenced and won't be released till
+ * iterator is proceed to the next device or exited. The caller is
+ * free to do whatever it wants to do with the device including
+ * calling back into class code.
+ */
+struct device *class_dev_iter_next(struct class_dev_iter *iter)
+{
+ struct klist_node *knode;
+ struct device *dev;
+
+ while (1) {
+ knode = klist_next(&iter->ki);
+ if (!knode)
+ return NULL;
+ dev = container_of(knode, struct device, knode_class);
+ if (!iter->type || iter->type == dev->type)
+ return dev;
+ }
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_next);
+
+/**
+ * class_dev_iter_exit - finish iteration
+ * @iter: class iterator to finish
+ *
+ * Finish an iteration. Always call this function after iteration is
+ * complete whether the iteration ran till the end or not.
+ */
+void class_dev_iter_exit(struct class_dev_iter *iter)
+{
+ klist_iter_exit(&iter->ki);
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_exit);
+
+/**
* class_for_each_device - device iterator
* @class: the class we're iterating
* @start: the device to start with in the list, if any.
@@ -283,13 +362,13 @@ char *make_class_name(const char *name, struct kobject *kobj)
* We check the return of @fn each time. If it returns anything
* other than 0, we break out and return that value.
*
- * Note, we hold class->class_mutex in this function, so it can not be
- * re-acquired in @fn, otherwise it will self-deadlocking. For
- * example, calls to add or remove class members would be verboten.
+ * @fn is allowed to do anything including calling back into class
+ * code. There's no locking restriction.
*/
int class_for_each_device(struct class *class, struct device *start,
void *data, int (*fn)(struct device *, void *))
{
+ struct class_dev_iter iter;
struct device *dev;
int error = 0;
@@ -301,20 +380,13 @@ int class_for_each_device(struct class *class, struct device *start,
return -EINVAL;
}
- mutex_lock(&class->p->class_mutex);
- list_for_each_entry(dev, &class->p->class_devices, node) {
- if (start) {
- if (start == dev)
- start = NULL;
- continue;
- }
- dev = get_device(dev);
+ class_dev_iter_init(&iter, class, start, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
error = fn(dev, data);
- put_device(dev);
if (error)
break;
}
- mutex_unlock(&class->p->class_mutex);
+ class_dev_iter_exit(&iter);
return error;
}
@@ -337,16 +409,15 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
*
* Note, you will need to drop the reference with put_device() after use.
*
- * We hold class->class_mutex in this function, so it can not be
- * re-acquired in @match, otherwise it will self-deadlocking. For
- * example, calls to add or remove class members would be verboten.
+ * @fn is allowed to do anything including calling back into class
+ * code. There's no locking restriction.
*/
struct device *class_find_device(struct class *class, struct device *start,
void *data,
int (*match)(struct device *, void *))
{
+ struct class_dev_iter iter;
struct device *dev;
- int found = 0;
if (!class)
return NULL;
@@ -356,29 +427,23 @@ struct device *class_find_device(struct class *class, struct device *start,
return NULL;
}
- mutex_lock(&class->p->class_mutex);
- list_for_each_entry(dev, &class->p->class_devices, node) {
- if (start) {
- if (start == dev)
- start = NULL;
- continue;
- }
- dev = get_device(dev);
+ class_dev_iter_init(&iter, class, start, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
if (match(dev, data)) {
- found = 1;
+ get_device(dev);
break;
- } else
- put_device(dev);
+ }
}
- mutex_unlock(&class->p->class_mutex);
+ class_dev_iter_exit(&iter);
- return found ? dev : NULL;
+ return dev;
}
EXPORT_SYMBOL_GPL(class_find_device);
int class_interface_register(struct class_interface *class_intf)
{
struct class *parent;
+ struct class_dev_iter iter;
struct device *dev;
if (!class_intf || !class_intf->class)
@@ -391,8 +456,10 @@ int class_interface_register(struct class_interface *class_intf)
mutex_lock(&parent->p->class_mutex);
list_add_tail(&class_intf->node, &parent->p->class_interfaces);
if (class_intf->add_dev) {
- list_for_each_entry(dev, &parent->p->class_devices, node)
+ class_dev_iter_init(&iter, parent, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter)))
class_intf->add_dev(dev, class_intf);
+ class_dev_iter_exit(&iter);
}
mutex_unlock(&parent->p->class_mutex);
@@ -402,6 +469,7 @@ int class_interface_register(struct class_interface *class_intf)
void class_interface_unregister(struct class_interface *class_intf)
{
struct class *parent = class_intf->class;
+ struct class_dev_iter iter;
struct device *dev;
if (!parent)
@@ -410,8 +478,10 @@ void class_interface_unregister(struct class_interface *class_intf)
mutex_lock(&parent->p->class_mutex);
list_del_init(&class_intf->node);
if (class_intf->remove_dev) {
- list_for_each_entry(dev, &parent->p->class_devices, node)
+ class_dev_iter_init(&iter, parent, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter)))
class_intf->remove_dev(dev, class_intf);
+ class_dev_iter_exit(&iter);
}
mutex_unlock(&parent->p->class_mutex);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d021c98605b..b98cb1416a2 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -536,7 +536,6 @@ void device_initialize(struct device *dev)
klist_init(&dev->klist_children, klist_children_get,
klist_children_put);
INIT_LIST_HEAD(&dev->dma_pools);
- INIT_LIST_HEAD(&dev->node);
init_MUTEX(&dev->sem);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
@@ -916,7 +915,8 @@ int device_add(struct device *dev)
if (dev->class) {
mutex_lock(&dev->class->p->class_mutex);
/* tie the class to the device */
- list_add_tail(&dev->node, &dev->class->p->class_devices);
+ klist_add_tail(&dev->knode_class,
+ &dev->class->p->class_devices);
/* notify any interfaces that the device is here */
list_for_each_entry(class_intf,
@@ -1032,7 +1032,7 @@ void device_del(struct device *dev)
if (class_intf->remove_dev)
class_intf->remove_dev(dev, class_intf);
/* remove the device from the class list */
- list_del_init(&dev->node);
+ klist_del(&dev->knode_class);
mutex_unlock(&dev->class->p->class_mutex);
}
device_remove_file(dev, &uevent_attr);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 0c39782b266..aa69556c348 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -109,12 +109,12 @@ static const struct attribute_group attr_group = {
static int
aoedisk_add_sysfs(struct aoedev *d)
{
- return sysfs_create_group(&d->gd->dev.kobj, &attr_group);
+ return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
}
void
aoedisk_rm_sysfs(struct aoedev *d)
{
- sysfs_remove_group(&d->gd->dev.kobj, &attr_group);
+ sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
}
static int
@@ -276,7 +276,7 @@ aoeblk_gdalloc(void *vp)
gd->first_minor = d->sysminor * AOE_PARTITIONS;
gd->fops = &aoe_bdops;
gd->private_data = d;
- gd->capacity = d->ssize;
+ set_capacity(gd, d->ssize);
snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
d->aoemajor, d->aoeminor);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 2f1746295d0..961d29a53ca 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -645,7 +645,7 @@ aoecmd_sleepwork(struct work_struct *work)
unsigned long flags;
u64 ssize;
- ssize = d->gd->capacity;
+ ssize = get_capacity(d->gd);
bd = bdget_disk(d->gd, 0);
if (bd) {
@@ -707,7 +707,7 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
return;
if (d->gd != NULL) {
- d->gd->capacity = ssize;
+ set_capacity(d->gd, ssize);
d->flags |= DEVFL_NEWSIZE;
} else
d->flags |= DEVFL_GDALLOC;
@@ -756,12 +756,17 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
unsigned long n_sect = bio->bi_size >> 9;
const int rw = bio_data_dir(bio);
struct hd_struct *part;
+ int cpu;
- part = get_part(disk, sector);
- all_stat_inc(disk, part, ios[rw], sector);
- all_stat_add(disk, part, ticks[rw], duration, sector);
- all_stat_add(disk, part, sectors[rw], n_sect, sector);
- all_stat_add(disk, part, io_ticks, duration, sector);
+ cpu = part_stat_lock();
+ part = disk_map_sector_rcu(disk, sector);
+
+ part_stat_inc(cpu, part, ios[rw]);
+ part_stat_add(cpu, part, ticks[rw], duration);
+ part_stat_add(cpu, part, sectors[rw], n_sect);
+ part_stat_add(cpu, part, io_ticks, duration);
+
+ part_stat_unlock();
}
void
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index a1d813ab0d6..6a8038d115b 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -91,7 +91,7 @@ aoedev_downdev(struct aoedev *d)
}
if (d->gd)
- d->gd->capacity = 0;
+ set_capacity(d->gd, 0);
d->flags &= ~DEVFL_UP;
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index b73116ef923..1e1f9153000 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3460,8 +3460,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
hba[i]->cmd_pool_bits =
- kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
- 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
+ kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
+ * sizeof(unsigned long), GFP_KERNEL);
hba[i]->cmd_pool = (CommandList_struct *)
pci_alloc_consistent(hba[i]->pdev,
hba[i]->nr_cmds * sizeof(CommandList_struct),
@@ -3493,8 +3493,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
/* command and error info recs zeroed out before
they are used */
memset(hba[i]->cmd_pool_bits, 0,
- ((hba[i]->nr_cmds + BITS_PER_LONG -
- 1) / BITS_PER_LONG) * sizeof(unsigned long));
+ DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
+ * sizeof(unsigned long));
hba[i]->num_luns = 0;
hba[i]->highest_lun = -1;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index e1233aabda7..a3fd87b4144 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -365,7 +365,7 @@ struct scsi2map {
static int
cciss_scsi_add_entry(int ctlr, int hostno,
- unsigned char *scsi3addr, int devtype,
+ struct cciss_scsi_dev_t *device,
struct scsi2map *added, int *nadded)
{
/* assumes hba[ctlr]->scsi_ctlr->lock is held */
@@ -384,12 +384,12 @@ cciss_scsi_add_entry(int ctlr, int hostno,
lun = 0;
/* Is this device a non-zero lun of a multi-lun device */
/* byte 4 of the 8-byte LUN addr will contain the logical unit no. */
- if (scsi3addr[4] != 0) {
+ if (device->scsi3addr[4] != 0) {
/* Search through our list and find the device which */
/* has the same 8 byte LUN address, excepting byte 4. */
/* Assign the same bus and target for this new LUN. */
/* Use the logical unit number from the firmware. */
- memcpy(addr1, scsi3addr, 8);
+ memcpy(addr1, device->scsi3addr, 8);
addr1[4] = 0;
for (i = 0; i < n; i++) {
sd = &ccissscsi[ctlr].dev[i];
@@ -399,7 +399,7 @@ cciss_scsi_add_entry(int ctlr, int hostno,
if (memcmp(addr1, addr2, 8) == 0) {
bus = sd->bus;
target = sd->target;
- lun = scsi3addr[4];
+ lun = device->scsi3addr[4];
break;
}
}
@@ -420,8 +420,12 @@ cciss_scsi_add_entry(int ctlr, int hostno,
added[*nadded].lun = sd->lun;
(*nadded)++;
- memcpy(&sd->scsi3addr[0], scsi3addr, 8);
- sd->devtype = devtype;
+ memcpy(sd->scsi3addr, device->scsi3addr, 8);
+ memcpy(sd->vendor, device->vendor, sizeof(sd->vendor));
+ memcpy(sd->revision, device->revision, sizeof(sd->revision));
+ memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
+ sd->devtype = device->devtype;
+
ccissscsi[ctlr].ndevices++;
/* initially, (before registering with scsi layer) we don't
@@ -487,6 +491,22 @@ static void fixup_botched_add(int ctlr, char *scsi3addr)
CPQ_TAPE_UNLOCK(ctlr, flags);
}
+static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
+ struct cciss_scsi_dev_t *dev2)
+{
+ return dev1->devtype == dev2->devtype &&
+ memcmp(dev1->scsi3addr, dev2->scsi3addr,
+ sizeof(dev1->scsi3addr)) == 0 &&
+ memcmp(dev1->device_id, dev2->device_id,
+ sizeof(dev1->device_id)) == 0 &&
+ memcmp(dev1->vendor, dev2->vendor,
+ sizeof(dev1->vendor)) == 0 &&
+ memcmp(dev1->model, dev2->model,
+ sizeof(dev1->model)) == 0 &&
+ memcmp(dev1->revision, dev2->revision,
+ sizeof(dev1->revision)) == 0;
+}
+
static int
adjust_cciss_scsi_table(int ctlr, int hostno,
struct cciss_scsi_dev_t sd[], int nsds)
@@ -532,7 +552,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
for (j=0;j<nsds;j++) {
if (SCSI3ADDR_EQ(sd[j].scsi3addr,
csd->scsi3addr)) {
- if (sd[j].devtype == csd->devtype)
+ if (device_is_the_same(&sd[j], csd))
found=2;
else
found=1;
@@ -548,22 +568,26 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
cciss_scsi_remove_entry(ctlr, hostno, i,
removed, &nremoved);
/* remove ^^^, hence i not incremented */
- }
- else if (found == 1) { /* device is different kind */
+ } else if (found == 1) { /* device is different in some way */
changes++;
- printk("cciss%d: device c%db%dt%dl%d type changed "
- "(device type now %s).\n",
- ctlr, hostno, csd->bus, csd->target, csd->lun,
- scsi_device_type(csd->devtype));
+ printk("cciss%d: device c%db%dt%dl%d has changed.\n",
+ ctlr, hostno, csd->bus, csd->target, csd->lun);
cciss_scsi_remove_entry(ctlr, hostno, i,
removed, &nremoved);
/* remove ^^^, hence i not incremented */
- if (cciss_scsi_add_entry(ctlr, hostno,
- &sd[j].scsi3addr[0], sd[j].devtype,
+ if (cciss_scsi_add_entry(ctlr, hostno, &sd[j],
added, &nadded) != 0)
/* we just removed one, so add can't fail. */
BUG();
csd->devtype = sd[j].devtype;
+ memcpy(csd->device_id, sd[j].device_id,
+ sizeof(csd->device_id));
+ memcpy(csd->vendor, sd[j].vendor,
+ sizeof(csd->vendor));
+ memcpy(csd->model, sd[j].model,
+ sizeof(csd->model));
+ memcpy(csd->revision, sd[j].revision,
+ sizeof(csd->revision));
} else /* device is same as it ever was, */
i++; /* so just move along. */
}
@@ -577,7 +601,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
csd = &ccissscsi[ctlr].dev[j];
if (SCSI3ADDR_EQ(sd[i].scsi3addr,
csd->scsi3addr)) {
- if (sd[i].devtype == csd->devtype)
+ if (device_is_the_same(&sd[i], csd))
found=2; /* found device */
else
found=1; /* found a bug. */
@@ -586,16 +610,14 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
}
if (!found) {
changes++;
- if (cciss_scsi_add_entry(ctlr, hostno,
-
- &sd[i].scsi3addr[0], sd[i].devtype,
+ if (cciss_scsi_add_entry(ctlr, hostno, &sd[i],
added, &nadded) != 0)
break;
} else if (found == 1) {
/* should never happen... */
changes++;
- printk("cciss%d: device unexpectedly changed type\n",
- ctlr);
+ printk(KERN_WARNING "cciss%d: device "
+ "unexpectedly changed\n", ctlr);
/* but if it does happen, we just ignore that device */
}
}
@@ -1012,7 +1034,8 @@ cciss_scsi_interpret_error(CommandList_struct *cp)
static int
cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
- unsigned char *buf, unsigned char bufsize)
+ unsigned char page, unsigned char *buf,
+ unsigned char bufsize)
{
int rc;
CommandList_struct *cp;
@@ -1032,8 +1055,8 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
ei = cp->err_info;
cdb[0] = CISS_INQUIRY;
- cdb[1] = 0;
- cdb[2] = 0;
+ cdb[1] = (page != 0);
+ cdb[2] = page;
cdb[3] = 0;
cdb[4] = bufsize;
cdb[5] = 0;
@@ -1053,6 +1076,25 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
return rc;
}
+/* Get the device id from inquiry page 0x83 */
+static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
+ unsigned char *device_id, int buflen)
+{
+ int rc;
+ unsigned char *buf;
+
+ if (buflen > 16)
+ buflen = 16;
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return -1;
+ rc = cciss_scsi_do_inquiry(c, scsi3addr, 0x83, buf, 64);
+ if (rc == 0)
+ memcpy(device_id, &buf[8], buflen);
+ kfree(buf);
+ return rc != 0;
+}
+
static int
cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
ReportLunData_struct *buf, int bufsize)
@@ -1142,25 +1184,21 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
ctlr_info_t *c;
__u32 num_luns=0;
unsigned char *ch;
- /* unsigned char found[CCISS_MAX_SCSI_DEVS_PER_HBA]; */
- struct cciss_scsi_dev_t currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
+ struct cciss_scsi_dev_t *currentsd, *this_device;
int ncurrent=0;
int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
int i;
c = (ctlr_info_t *) hba[cntl_num];
ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
- if (ld_buff == NULL) {
- printk(KERN_ERR "cciss: out of memory\n");
- return;
- }
inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
- if (inq_buff == NULL) {
- printk(KERN_ERR "cciss: out of memory\n");
- kfree(ld_buff);
- return;
+ currentsd = kzalloc(sizeof(*currentsd) *
+ (CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL);
+ if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) {
+ printk(KERN_ERR "cciss: out of memory\n");
+ goto out;
}
-
+ this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
ch = &ld_buff->LUNListLength[0];
num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
@@ -1179,23 +1217,34 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
/* adjust our table of devices */
- for(i=0; i<num_luns; i++)
- {
- int devtype;
-
+ for (i = 0; i < num_luns; i++) {
/* for each physical lun, do an inquiry */
if (ld_buff->LUN[i][3] & 0xC0) continue;
memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
- if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, inq_buff,
- (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
+ if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, 0, inq_buff,
+ (unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
/* Inquiry failed (msg printed already) */
- devtype = 0; /* so we will skip this device. */
- } else /* what kind of device is this? */
- devtype = (inq_buff[0] & 0x1f);
-
- switch (devtype)
+ continue; /* so we will skip this device. */
+
+ this_device->devtype = (inq_buff[0] & 0x1f);
+ this_device->bus = -1;
+ this_device->target = -1;
+ this_device->lun = -1;
+ memcpy(this_device->scsi3addr, scsi3addr, 8);
+ memcpy(this_device->vendor, &inq_buff[8],
+ sizeof(this_device->vendor));
+ memcpy(this_device->model, &inq_buff[16],
+ sizeof(this_device->model));
+ memcpy(this_device->revision, &inq_buff[32],
+ sizeof(this_device->revision));
+ memset(this_device->device_id, 0,
+ sizeof(this_device->device_id));
+ cciss_scsi_get_device_id(hba[cntl_num], scsi3addr,
+ this_device->device_id, sizeof(this_device->device_id));
+
+ switch (this_device->devtype)
{
case 0x05: /* CD-ROM */ {
@@ -1220,15 +1269,10 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
printk(KERN_INFO "cciss%d: %s ignored, "
"too many devices.\n", cntl_num,
- scsi_device_type(devtype));
+ scsi_device_type(this_device->devtype));
break;
}
- memcpy(&currentsd[ncurrent].scsi3addr[0],
- &scsi3addr[0], 8);
- currentsd[ncurrent].devtype = devtype;
- currentsd[ncurrent].bus = -1;
- currentsd[ncurrent].target = -1;
- currentsd[ncurrent].lun = -1;
+ currentsd[ncurrent] = *this_device;
ncurrent++;
break;
default:
@@ -1240,6 +1284,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
out:
kfree(inq_buff);
kfree(ld_buff);
+ kfree(currentsd);
return;
}
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
index d9c2c586502..7b750245ae7 100644
--- a/drivers/block/cciss_scsi.h
+++ b/drivers/block/cciss_scsi.h
@@ -66,6 +66,10 @@ struct cciss_scsi_dev_t {
int devtype;
int bus, target, lun; /* as presented to the OS */
unsigned char scsi3addr[8]; /* as presented to the HW */
+ unsigned char device_id[16]; /* from inquiry pg. 0x83 */
+ unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
+ unsigned char model[16]; /* bytes 16-31 of inquiry data */
+ unsigned char revision[4]; /* bytes 32-35 of inquiry data */
};
struct cciss_scsi_hba_t {
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 09c14341e6e..3d967525e9a 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -424,7 +424,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
&(hba[i]->cmd_pool_dhandle));
hba[i]->cmd_pool_bits = kcalloc(
- (NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG, sizeof(unsigned long),
+ DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
GFP_KERNEL);
if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 395f8ea7981..cf64ddf5d83 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -423,8 +423,15 @@ static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
* 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
* side 0 is on physical side 0 (but with the misnamed sector IDs).
* 'stretch' should probably be renamed to something more general, like
- * 'options'. Other parameters should be self-explanatory (see also
- * setfdprm(8)).
+ * 'options'.
+ *
+ * Bits 2 through 9 of 'stretch' tell the number of the first sector.
+ * The LSB (bit 2) is flipped. For most disks, the first sector
+ * is 1 (represented by 0x00<<2). For some CP/M and music sampler
+ * disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2).
+ * For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2).
+ *
+ * Other parameters should be self-explanatory (see also setfdprm(8)).
*/
/*
Size
@@ -1355,20 +1362,20 @@ static void fdc_specify(void)
}
/* Convert step rate from microseconds to milliseconds and 4 bits */
- srt = 16 - (DP->srt * scale_dtr / 1000 + NOMINAL_DTR - 1) / NOMINAL_DTR;
+ srt = 16 - DIV_ROUND_UP(DP->srt * scale_dtr / 1000, NOMINAL_DTR);
if (slow_floppy) {
srt = srt / 4;
}
SUPBOUND(srt, 0xf);
INFBOUND(srt, 0);
- hlt = (DP->hlt * scale_dtr / 2 + NOMINAL_DTR - 1) / NOMINAL_DTR;
+ hlt = DIV_ROUND_UP(DP->hlt * scale_dtr / 2, NOMINAL_DTR);
if (hlt < 0x01)
hlt = 0x01;
else if (hlt > 0x7f)
hlt = hlt_max_code;
- hut = (DP->hut * scale_dtr / 16 + NOMINAL_DTR - 1) / NOMINAL_DTR;
+ hut = DIV_ROUND_UP(DP->hut * scale_dtr / 16, NOMINAL_DTR);
if (hut < 0x1)
hut = 0x1;
else if (hut > 0xf)
@@ -2236,9 +2243,9 @@ static void setup_format_params(int track)
}
}
}
- if (_floppy->stretch & FD_ZEROBASED) {
+ if (_floppy->stretch & FD_SECTBASEMASK) {
for (count = 0; count < F_SECT_PER_TRACK; count++)
- here[count].sect--;
+ here[count].sect += FD_SECTBASE(_floppy) - 1;
}
}
@@ -2385,7 +2392,7 @@ static void rw_interrupt(void)
#ifdef FLOPPY_SANITY_CHECK
if (nr_sectors / ssize >
- (in_sector_offset + current_count_sectors + ssize - 1) / ssize) {
+ DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) {
DPRINT("long rw: %x instead of %lx\n",
nr_sectors, current_count_sectors);
printk("rs=%d s=%d\n", R_SECTOR, SECTOR);
@@ -2649,7 +2656,7 @@ static int make_raw_rw_request(void)
}
HEAD = fsector_t / _floppy->sect;
- if (((_floppy->stretch & (FD_SWAPSIDES | FD_ZEROBASED)) ||
+ if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) ||
TESTF(FD_NEED_TWADDLE)) && fsector_t < _floppy->sect)
max_sector = _floppy->sect;
@@ -2679,7 +2686,7 @@ static int make_raw_rw_request(void)
CODE2SIZE;
SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) +
- ((_floppy->stretch & FD_ZEROBASED) ? 0 : 1);
+ FD_SECTBASE(_floppy);
/* tracksize describes the size which can be filled up with sectors
* of size ssize.
@@ -3311,7 +3318,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
g->head <= 0 ||
g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
/* check if reserved bits are set */
- (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_ZEROBASED)) != 0)
+ (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
return -EINVAL;
if (type) {
if (!capable(CAP_SYS_ADMIN))
@@ -3356,7 +3363,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
if (DRS->maxblock > user_params[drive].sect ||
DRS->maxtrack ||
((user_params[drive].sect ^ oldStretch) &
- (FD_SWAPSIDES | FD_ZEROBASED)))
+ (FD_SWAPSIDES | FD_SECTBASEMASK)))
invalidate_drive(bdev);
else
process_fd_request();
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 1778e4a2c67..7b3351260d5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -403,7 +403,7 @@ static int nbd_do_it(struct nbd_device *lo)
BUG_ON(lo->magic != LO_MAGIC);
lo->pid = current->pid;
- ret = sysfs_create_file(&lo->disk->dev.kobj, &pid_attr.attr);
+ ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
if (ret) {
printk(KERN_ERR "nbd: sysfs_create_file failed!");
return ret;
@@ -412,7 +412,7 @@ static int nbd_do_it(struct nbd_device *lo)
while ((req = nbd_read_stat(lo)) != NULL)
nbd_end_request(req);
- sysfs_remove_file(&lo->disk->dev.kobj, &pid_attr.attr);
+ sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
return 0;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 29b7a648cc6..0e077150568 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2544,7 +2544,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
if (last_zone != zone) {
BUG_ON(last_zone != zone + pd->settings.size);
first_sectors = last_zone - bio->bi_sector;
- bp = bio_split(bio, bio_split_pool, first_sectors);
+ bp = bio_split(bio, first_sectors);
BUG_ON(!bp);
pkt_make_request(q, &bp->bio1);
pkt_make_request(q, &bp->bio2);
@@ -2911,7 +2911,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
if (!disk->queue)
goto out_mem2;
- pd->pkt_dev = MKDEV(disk->major, disk->first_minor);
+ pd->pkt_dev = MKDEV(pktdev_major, idx);
ret = pkt_new_dev(pd, dev);
if (ret)
goto out_new_dev;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index d797e209951..936466f62af 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -199,7 +199,8 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
if (blk_fs_request(req)) {
if (ps3disk_submit_request_sg(dev, req))
break;
- } else if (req->cmd_type == REQ_TYPE_FLUSH) {
+ } else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
+ req->cmd[0] == REQ_LB_OP_FLUSH) {
if (ps3disk_submit_flush_request(dev, req))
break;
} else {
@@ -257,7 +258,8 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
- if (req->cmd_type == REQ_TYPE_FLUSH) {
+ if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
+ req->cmd[0] == REQ_LB_OP_FLUSH) {
read = 0;
num_sectors = req->hard_cur_sectors;
op = "flush";
@@ -405,7 +407,8 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
- req->cmd_type = REQ_TYPE_FLUSH;
+ req->cmd_type = REQ_TYPE_LINUX_BLOCK;
+ req->cmd[0] = REQ_LB_OP_FLUSH;
}
static unsigned long ps3disk_mask;
@@ -538,7 +541,7 @@ static int ps3disk_remove(struct ps3_system_bus_device *_dev)
struct ps3disk_private *priv = dev->sbd.core.driver_data;
mutex_lock(&ps3disk_mask_mutex);
- __clear_bit(priv->gendisk->first_minor / PS3DISK_MINORS,
+ __clear_bit(MINOR(disk_devt(priv->gendisk)) / PS3DISK_MINORS,
&ps3disk_mask);
mutex_unlock(&ps3disk_mask_mutex);
del_gendisk(priv->gendisk);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 42251095134..6ec5fc05278 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -47,20 +47,20 @@ static void blk_done(struct virtqueue *vq)
spin_lock_irqsave(&vblk->lock, flags);
while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
- int uptodate;
+ int error;
switch (vbr->status) {
case VIRTIO_BLK_S_OK:
- uptodate = 1;
+ error = 0;
break;
case VIRTIO_BLK_S_UNSUPP:
- uptodate = -ENOTTY;
+ error = -ENOTTY;
break;
default:
- uptodate = 0;
+ error = -EIO;
break;
}
- end_dequeued_request(vbr->req, uptodate);
+ __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req));
list_del(&vbr->list);
mempool_free(vbr, vblk->pool);
}
@@ -84,11 +84,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
if (blk_fs_request(vbr->req)) {
vbr->out_hdr.type = 0;
vbr->out_hdr.sector = vbr->req->sector;
- vbr->out_hdr.ioprio = vbr->req->ioprio;
+ vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
} else if (blk_pc_request(vbr->req)) {
vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
vbr->out_hdr.sector = 0;
- vbr->out_hdr.ioprio = vbr->req->ioprio;
+ vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
} else {
/* We don't put anything else in the queue. */
BUG();
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3ca643cafcc..bff602ccccf 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -105,15 +105,17 @@ static DEFINE_SPINLOCK(blkif_io_lock);
#define GRANT_INVALID_REF 0
#define PARTS_PER_DISK 16
+#define PARTS_PER_EXT_DISK 256
#define BLKIF_MAJOR(dev) ((dev)>>8)
#define BLKIF_MINOR(dev) ((dev) & 0xff)
-#define DEV_NAME "xvd" /* name in /dev */
+#define EXT_SHIFT 28
+#define EXTENDED (1<<EXT_SHIFT)
+#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
+#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
-/* Information about our VBDs. */
-#define MAX_VBDS 64
-static LIST_HEAD(vbds_list);
+#define DEV_NAME "xvd" /* name in /dev */
static int get_id_from_freelist(struct blkfront_info *info)
{
@@ -386,31 +388,60 @@ static int xlvbd_barrier(struct blkfront_info *info)
}
-static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity,
- int vdevice, u16 vdisk_info, u16 sector_size,
- struct blkfront_info *info)
+static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
+ struct blkfront_info *info,
+ u16 vdisk_info, u16 sector_size)
{
struct gendisk *gd;
int nr_minors = 1;
int err = -ENODEV;
+ unsigned int offset;
+ int minor;
+ int nr_parts;
BUG_ON(info->gd != NULL);
BUG_ON(info->rq != NULL);
- if ((minor % PARTS_PER_DISK) == 0)
- nr_minors = PARTS_PER_DISK;
+ if ((info->vdevice>>EXT_SHIFT) > 1) {
+ /* this is above the extended range; something is wrong */
+ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
+ return -ENODEV;
+ }
+
+ if (!VDEV_IS_EXTENDED(info->vdevice)) {
+ minor = BLKIF_MINOR(info->vdevice);
+ nr_parts = PARTS_PER_DISK;
+ } else {
+ minor = BLKIF_MINOR_EXT(info->vdevice);
+ nr_parts = PARTS_PER_EXT_DISK;
+ }
+
+ if ((minor % nr_parts) == 0)
+ nr_minors = nr_parts;
gd = alloc_disk(nr_minors);
if (gd == NULL)
goto out;
- if (nr_minors > 1)
- sprintf(gd->disk_name, "%s%c", DEV_NAME,
- 'a' + minor / PARTS_PER_DISK);
- else
- sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
- 'a' + minor / PARTS_PER_DISK,
- minor % PARTS_PER_DISK);
+ offset = minor / nr_parts;
+
+ if (nr_minors > 1) {
+ if (offset < 26)
+ sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
+ else
+ sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
+ 'a' + ((offset / 26)-1), 'a' + (offset % 26));
+ } else {
+ if (offset < 26)
+ sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
+ 'a' + offset,
+ minor & (nr_parts - 1));
+ else
+ sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
+ 'a' + ((offset / 26) - 1),
+ 'a' + (offset % 26),
+ minor & (nr_parts - 1));
+ }
gd->major = XENVBD_MAJOR;
gd->first_minor = minor;
@@ -699,8 +730,13 @@ static int blkfront_probe(struct xenbus_device *dev,
err = xenbus_scanf(XBT_NIL, dev->nodename,
"virtual-device", "%i", &vdevice);
if (err != 1) {
- xenbus_dev_fatal(dev, err, "reading virtual-device");
- return err;
+ /* go looking in the extended area instead */
+ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
+ "%i", &vdevice);
+ if (err != 1) {
+ xenbus_dev_fatal(dev, err, "reading virtual-device");
+ return err;
+ }
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -861,9 +897,7 @@ static void blkfront_connect(struct blkfront_info *info)
if (err)
info->feature_barrier = 0;
- err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice),
- sectors, info->vdevice,
- binfo, sector_size, info);
+ err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
info->xbdev->otherend);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 1e55a658e6c..32f3a8ed8d3 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -256,7 +256,6 @@ static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev)
BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
- kfree(buf);
}
usb_free_urb(urb);
@@ -298,7 +297,6 @@ static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev)
BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
- kfree(buf);
}
usb_free_urb(urb);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 6a010681ecf..af472e05273 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,14 +102,19 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
/* Broadcom BCM2046 */
+ { USB_DEVICE(0x0a5c, 0x2146), .driver_info = BTUSB_RESET },
{ USB_DEVICE(0x0a5c, 0x2151), .driver_info = BTUSB_RESET },
+ /* Apple MacBook Pro with Broadcom chip */
+ { USB_DEVICE(0x05ac, 0x820f), .driver_info = BTUSB_RESET },
+
/* IBM/Lenovo ThinkPad with Broadcom chip */
{ USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
/* Targus ACB10US */
{ USB_DEVICE(0x0a5c, 0x2100), .driver_info = BTUSB_RESET },
+ { USB_DEVICE(0x0a5c, 0x2154), .driver_info = BTUSB_RESET },
/* ANYCOM Bluetooth USB-200 and USB-250 */
{ USB_DEVICE(0x0a5c, 0x2111), .driver_info = BTUSB_RESET },
@@ -147,6 +152,9 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
+ /* Belkin F8T016 device */
+ { USB_DEVICE(0x050d, 0x016a), .driver_info = BTUSB_RESET },
+
/* Digianswer devices */
{ USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER },
{ USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE },
@@ -169,6 +177,7 @@ static struct usb_device_id blacklist_table[] = {
struct btusb_data {
struct hci_dev *hdev;
struct usb_device *udev;
+ struct usb_interface *intf;
struct usb_interface *isoc;
spinlock_t lock;
@@ -267,7 +276,6 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev)
BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
- kfree(buf);
}
usb_free_urb(urb);
@@ -350,7 +358,6 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev)
BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
- kfree(buf);
}
usb_free_urb(urb);
@@ -471,7 +478,6 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev)
BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
- kfree(buf);
}
usb_free_urb(urb);
@@ -516,7 +522,7 @@ static int btusb_open(struct hci_dev *hdev)
err = btusb_submit_intr_urb(hdev);
if (err < 0) {
- clear_bit(BTUSB_INTR_RUNNING, &hdev->flags);
+ clear_bit(BTUSB_INTR_RUNNING, &data->flags);
clear_bit(HCI_RUNNING, &hdev->flags);
}
@@ -532,8 +538,10 @@ static int btusb_close(struct hci_dev *hdev)
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
+ cancel_work_sync(&data->work);
+
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
- usb_kill_anchored_urbs(&data->intr_anchor);
+ usb_kill_anchored_urbs(&data->isoc_anchor);
clear_bit(BTUSB_BULK_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->bulk_anchor);
@@ -821,6 +829,7 @@ static int btusb_probe(struct usb_interface *intf,
}
data->udev = interface_to_usbdev(intf);
+ data->intf = intf;
spin_lock_init(&data->lock);
@@ -889,7 +898,7 @@ static int btusb_probe(struct usb_interface *intf,
if (data->isoc) {
err = usb_driver_claim_interface(&btusb_driver,
- data->isoc, NULL);
+ data->isoc, data);
if (err < 0) {
hci_free_dev(hdev);
kfree(data);
@@ -921,13 +930,22 @@ static void btusb_disconnect(struct usb_interface *intf)
hdev = data->hdev;
- if (data->isoc)
- usb_driver_release_interface(&btusb_driver, data->isoc);
+ __hci_dev_hold(hdev);
- usb_set_intfdata(intf, NULL);
+ usb_set_intfdata(data->intf, NULL);
+
+ if (data->isoc)
+ usb_set_intfdata(data->isoc, NULL);
hci_unregister_dev(hdev);
+ if (intf == data->isoc)
+ usb_driver_release_interface(&btusb_driver, data->intf);
+ else if (data->isoc)
+ usb_driver_release_interface(&btusb_driver, data->isoc);
+
+ __hci_dev_put(hdev);
+
hci_free_dev(hdev);
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 74031de517e..d47f2f80acc 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2097,7 +2097,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
len = nr * CD_FRAMESIZE_RAW;
- ret = blk_rq_map_user(q, rq, ubuf, len);
+ ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
if (ret)
break;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 1231d95aa69..d6ba77a2dd7 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -624,14 +624,14 @@ static void gdrom_readdisk_dma(struct work_struct *work)
ctrl_outb(1, GDROM_DMA_STATUS_REG);
wait_event_interruptible_timeout(request_queue,
gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
- err = gd.transfer;
+ err = gd.transfer ? -EIO : 0;
gd.transfer = 0;
gd.pending = 0;
/* now seek to take the request spinlock
* before handling ending the request */
spin_lock(&gdrom_lock);
list_del_init(&req->queuelist);
- end_dequeued_request(req, 1 - err);
+ __blk_end_request(req, err, blk_rq_bytes(req));
}
spin_unlock(&gdrom_lock);
kfree(read_command);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7ce1ac4baa6..6af435b8986 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -661,10 +661,10 @@ void add_disk_randomness(struct gendisk *disk)
if (!disk || !disk->random)
return;
/* first major is 1, so we get >= 0x200 here */
- DEBUG_ENT("disk event %d:%d\n", disk->major, disk->first_minor);
+ DEBUG_ENT("disk event %d:%d\n",
+ MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
- add_timer_randomness(disk->random,
- 0x100 + MKDEV(disk->major, disk->first_minor));
+ add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
}
#endif
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 3738cfa209f..f5fc64f89c5 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -6,6 +6,7 @@ menuconfig TCG_TPM
tristate "TPM Hardware Support"
depends on HAS_IOMEM
depends on EXPERIMENTAL
+ select SECURITYFS
---help---
If you have a TPM security chip in your system, which
implements the Trusted Computing Group's specification,
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index daeb8f76697..e4dce870954 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -695,13 +695,23 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
{
struct tty_driver *p, *res = NULL;
int tty_line = 0;
+ int len;
char *str;
+ for (str = name; *str; str++)
+ if ((*str >= '0' && *str <= '9') || *str == ',')
+ break;
+ if (!*str)
+ return NULL;
+
+ len = str - name;
+ tty_line = simple_strtoul(str, &str, 10);
+
mutex_lock(&tty_mutex);
/* Search through the tty devices to look for a match */
list_for_each_entry(p, &tty_drivers, tty_drivers) {
- str = name + strlen(p->name);
- tty_line = simple_strtoul(str, &str, 10);
+ if (strncmp(name, p->name, len) != 0)
+ continue;
if (*str == ',')
str++;
if (*str == '\0')
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 4eee533f3f4..71d2ac4e3f4 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -178,11 +178,13 @@ static int verify_pmtmr_rate(void)
/* Number of monotonicity checks to perform during initialization */
#define ACPI_PM_MONOTONICITY_CHECKS 10
+/* Number of reads we try to get two different values */
+#define ACPI_PM_READ_CHECKS 10000
static int __init init_acpi_pm_clocksource(void)
{
cycle_t value1, value2;
- unsigned int i, j, good = 0;
+ unsigned int i, j = 0;
if (!pmtmr_ioport)
return -ENODEV;
@@ -192,29 +194,26 @@ static int __init init_acpi_pm_clocksource(void)
/* "verify" this timing source: */
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
+ udelay(100 * j);
value1 = clocksource_acpi_pm.read();
- for (i = 0; i < 10000; i++) {
+ for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
value2 = clocksource_acpi_pm.read();
if (value2 == value1)
continue;
if (value2 > value1)
- good++;
break;
if ((value2 < value1) && ((value2) < 0xFFF))
- good++;
break;
printk(KERN_INFO "PM-Timer had inconsistent results:"
" 0x%#llx, 0x%#llx - aborting.\n",
value1, value2);
return -EINVAL;
}
- udelay(300 * i);
- }
-
- if (good != ACPI_PM_MONOTONICITY_CHECKS) {
- printk(KERN_INFO "PM-Timer failed consistency check "
- " (0x%#llx) - aborting.\n", value1);
- return -ENODEV;
+ if (i == ACPI_PM_READ_CHECKS) {
+ printk(KERN_INFO "PM-Timer failed consistency check "
+ " (0x%#llx) - aborting.\n", value1);
+ return -ENODEV;
+ }
}
if (verify_pmtmr_rate() != 0)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8a67f16987d..31d6f535a79 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1467,25 +1467,27 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
- int ret;
+ int ret = -EINVAL;
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
- return -EINVAL;
+ goto no_policy;
if (unlikely(lock_policy_rwsem_write(policy->cpu)))
- return -EINVAL;
+ goto fail;
ret = __cpufreq_driver_target(policy, target_freq, relation);
unlock_policy_rwsem_write(policy->cpu);
+fail:
cpufreq_cpu_put(policy);
+no_policy:
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
-int __cpufreq_driver_getavg(struct cpufreq_policy *policy)
+int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
{
int ret = 0;
@@ -1493,8 +1495,8 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy)
if (!policy)
return -EINVAL;
- if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
- ret = cpufreq_driver->getavg(policy->cpu);
+ if (cpu_online(cpu) && cpufreq_driver->getavg)
+ ret = cpufreq_driver->getavg(policy, cpu);
cpufreq_cpu_put(policy);
return ret;
@@ -1717,13 +1719,17 @@ int cpufreq_update_policy(unsigned int cpu)
{
struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
struct cpufreq_policy policy;
- int ret = 0;
+ int ret;
- if (!data)
- return -ENODEV;
+ if (!data) {
+ ret = -ENODEV;
+ goto no_policy;
+ }
- if (unlikely(lock_policy_rwsem_write(cpu)))
- return -EINVAL;
+ if (unlikely(lock_policy_rwsem_write(cpu))) {
+ ret = -EINVAL;
+ goto fail;
+ }
dprintk("updating policy for CPU %u\n", cpu);
memcpy(&policy, data, sizeof(struct cpufreq_policy));
@@ -1750,7 +1756,9 @@ int cpufreq_update_policy(unsigned int cpu)
unlock_policy_rwsem_write(cpu);
+fail:
cpufreq_cpu_put(data);
+no_policy:
return ret;
}
EXPORT_SYMBOL(cpufreq_update_policy);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index ac0bbf2d234..e2657837d95 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -460,6 +460,7 @@ static void do_dbs_timer(struct work_struct *work)
static inline void dbs_timer_init(void)
{
+ init_timer_deferrable(&dbs_work.timer);
schedule_delayed_work(&dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
return;
@@ -575,13 +576,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return 0;
}
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+static
+#endif
struct cpufreq_governor cpufreq_gov_conservative = {
.name = "conservative",
.governor = cpufreq_governor_dbs,
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
.owner = THIS_MODULE,
};
-EXPORT_SYMBOL(cpufreq_gov_conservative);
static int __init cpufreq_gov_dbs_init(void)
{
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 33855cb3cf1..2ab3c12b88a 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -18,13 +18,19 @@
#include <linux/jiffies.h>
#include <linux/kernel_stat.h>
#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/ktime.h>
/*
* dbs is used in this file as a shortform for demandbased switching
* It helps to keep variable names smaller, simpler
*/
+#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
+#define MICRO_FREQUENCY_UP_THRESHOLD (95)
#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
@@ -57,6 +63,7 @@ enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
struct cpu_dbs_info_s {
cputime64_t prev_cpu_idle;
cputime64_t prev_cpu_wall;
+ cputime64_t prev_cpu_nice;
struct cpufreq_policy *cur_policy;
struct delayed_work work;
struct cpufreq_frequency_table *freq_table;
@@ -86,21 +93,24 @@ static struct workqueue_struct *kondemand_wq;
static struct dbs_tuners {
unsigned int sampling_rate;
unsigned int up_threshold;
+ unsigned int down_differential;
unsigned int ignore_nice;
unsigned int powersave_bias;
} dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
+ .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
.ignore_nice = 0,
.powersave_bias = 0,
};
-static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
+static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
+ cputime64_t *wall)
{
cputime64_t idle_time;
- cputime64_t cur_jiffies;
+ cputime64_t cur_wall_time;
cputime64_t busy_time;
- cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
kstat_cpu(cpu).cpustat.system);
@@ -113,7 +123,37 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
kstat_cpu(cpu).cpustat.nice);
}
- idle_time = cputime64_sub(cur_jiffies, busy_time);
+ idle_time = cputime64_sub(cur_wall_time, busy_time);
+ if (wall)
+ *wall = cur_wall_time;
+
+ return idle_time;
+}
+
+static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, wall);
+
+ if (idle_time == -1ULL)
+ return get_cpu_idle_time_jiffy(cpu, wall);
+
+ if (dbs_tuners_ins.ignore_nice) {
+ cputime64_t cur_nice;
+ unsigned long cur_nice_jiffies;
+ struct cpu_dbs_info_s *dbs_info;
+
+ dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice,
+ dbs_info->prev_cpu_nice);
+ /*
+ * Assumption: nice time between sampling periods will be
+ * less than 2^32 jiffies for 32 bit sys
+ */
+ cur_nice_jiffies = (unsigned long)
+ cputime64_to_jiffies64(cur_nice);
+ dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice;
+ return idle_time + jiffies_to_usecs(cur_nice_jiffies);
+ }
return idle_time;
}
@@ -277,8 +317,8 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
for_each_online_cpu(j) {
struct cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(cpu_dbs_info, j);
- dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
- dbs_info->prev_cpu_wall = get_jiffies_64();
+ dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->prev_cpu_wall);
}
mutex_unlock(&dbs_mutex);
@@ -334,9 +374,7 @@ static struct attribute_group dbs_attr_group = {
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
- unsigned int idle_ticks, total_ticks;
- unsigned int load = 0;
- cputime64_t cur_jiffies;
+ unsigned int max_load_freq;
struct cpufreq_policy *policy;
unsigned int j;
@@ -346,13 +384,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
this_dbs_info->freq_lo = 0;
policy = this_dbs_info->cur_policy;
- cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
- total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
- this_dbs_info->prev_cpu_wall);
- this_dbs_info->prev_cpu_wall = get_jiffies_64();
- if (!total_ticks)
- return;
/*
* Every sampling_rate, we check, if current idle time is less
* than 20% (default), then we try to increase frequency
@@ -365,27 +397,44 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
* 5% (default) of current frequency
*/
- /* Get Idle Time */
- idle_ticks = UINT_MAX;
+ /* Get Absolute Load - in terms of freq */
+ max_load_freq = 0;
+
for_each_cpu_mask_nr(j, policy->cpus) {
- cputime64_t total_idle_ticks;
- unsigned int tmp_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
+ cputime64_t cur_wall_time, cur_idle_time;
+ unsigned int idle_time, wall_time;
+ unsigned int load, load_freq;
+ int freq_avg;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
- total_idle_ticks = get_cpu_idle_time(j);
- tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
+
+ cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
+
+ wall_time = (unsigned int) cputime64_sub(cur_wall_time,
+ j_dbs_info->prev_cpu_wall);
+ j_dbs_info->prev_cpu_wall = cur_wall_time;
+
+ idle_time = (unsigned int) cputime64_sub(cur_idle_time,
j_dbs_info->prev_cpu_idle);
- j_dbs_info->prev_cpu_idle = total_idle_ticks;
+ j_dbs_info->prev_cpu_idle = cur_idle_time;
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ continue;
+
+ load = 100 * (wall_time - idle_time) / wall_time;
+
+ freq_avg = __cpufreq_driver_getavg(policy, j);
+ if (freq_avg <= 0)
+ freq_avg = policy->cur;
- if (tmp_idle_ticks < idle_ticks)
- idle_ticks = tmp_idle_ticks;
+ load_freq = load * freq_avg;
+ if (load_freq > max_load_freq)
+ max_load_freq = load_freq;
}
- if (likely(total_ticks > idle_ticks))
- load = (100 * (total_ticks - idle_ticks)) / total_ticks;
/* Check for frequency increase */
- if (load > dbs_tuners_ins.up_threshold) {
+ if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
/* if we are already at full speed then break out early */
if (!dbs_tuners_ins.powersave_bias) {
if (policy->cur == policy->max)
@@ -412,15 +461,13 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
* can support the current CPU usage without triggering the up
* policy. To be safe, we focus 10 points under the threshold.
*/
- if (load < (dbs_tuners_ins.up_threshold - 10)) {
- unsigned int freq_next, freq_cur;
-
- freq_cur = __cpufreq_driver_getavg(policy);
- if (!freq_cur)
- freq_cur = policy->cur;
-
- freq_next = (freq_cur * load) /
- (dbs_tuners_ins.up_threshold - 10);
+ if (max_load_freq <
+ (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
+ policy->cur) {
+ unsigned int freq_next;
+ freq_next = max_load_freq /
+ (dbs_tuners_ins.up_threshold -
+ dbs_tuners_ins.down_differential);
if (!dbs_tuners_ins.powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
@@ -526,8 +573,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
- j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
- j_dbs_info->prev_cpu_wall = get_jiffies_64();
+ j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
+ &j_dbs_info->prev_cpu_wall);
}
this_dbs_info->cpu = cpu;
/*
@@ -579,22 +626,42 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return 0;
}
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+static
+#endif
struct cpufreq_governor cpufreq_gov_ondemand = {
.name = "ondemand",
.governor = cpufreq_governor_dbs,
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
.owner = THIS_MODULE,
};
-EXPORT_SYMBOL(cpufreq_gov_ondemand);
static int __init cpufreq_gov_dbs_init(void)
{
+ int err;
+ cputime64_t wall;
+ u64 idle_time;
+ int cpu = get_cpu();
+
+ idle_time = get_cpu_idle_time_us(cpu, &wall);
+ put_cpu();
+ if (idle_time != -1ULL) {
+ /* Idle micro accounting is supported. Use finer thresholds */
+ dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
+ dbs_tuners_ins.down_differential =
+ MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
+ }
+
kondemand_wq = create_workqueue("kondemand");
if (!kondemand_wq) {
printk(KERN_ERR "Creation of kondemand failed\n");
return -EFAULT;
}
- return cpufreq_register_governor(&cpufreq_gov_ondemand);
+ err = cpufreq_register_governor(&cpufreq_gov_ondemand);
+ if (err)
+ destroy_workqueue(kondemand_wq);
+
+ return err;
}
static void __exit cpufreq_gov_dbs_exit(void)
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index e8e1451ef1c..7e2e515087f 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -36,12 +36,14 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy,
return 0;
}
+#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE
+static
+#endif
struct cpufreq_governor cpufreq_gov_performance = {
.name = "performance",
.governor = cpufreq_governor_performance,
.owner = THIS_MODULE,
};
-EXPORT_SYMBOL(cpufreq_gov_performance);
static int __init cpufreq_gov_performance_init(void)
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 88d2f44fba4..e6db5faf3eb 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -35,12 +35,14 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
return 0;
}
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
+static
+#endif
struct cpufreq_governor cpufreq_gov_powersave = {
.name = "powersave",
.governor = cpufreq_governor_powersave,
.owner = THIS_MODULE,
};
-EXPORT_SYMBOL(cpufreq_gov_powersave);
static int __init cpufreq_gov_powersave_init(void)
{
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 32244aa7cc0..1442bbada05 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -187,6 +187,9 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
}
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
+static
+#endif
struct cpufreq_governor cpufreq_gov_userspace = {
.name = "userspace",
.governor = cpufreq_governor_userspace,
@@ -194,7 +197,6 @@ struct cpufreq_governor cpufreq_gov_userspace = {
.show_setspeed = show_speed,
.owner = THIS_MODULE,
};
-EXPORT_SYMBOL(cpufreq_gov_userspace);
static int __init cpufreq_gov_userspace_init(void)
{
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index ee827a7f7c6..b6ad3ac5916 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1157,6 +1157,8 @@ static int aead_authenc_givencrypt(
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
+ /* avoid consecutive packets going out with same IV */
+ *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
return ipsec_esp(edesc, areq, req->giv, req->seq,
ipsec_esp_encrypt_done);
@@ -1449,6 +1451,8 @@ static int talitos_probe(struct of_device *ofdev,
priv->ofdev = ofdev;
+ INIT_LIST_HEAD(&priv->alg_list);
+
tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
tasklet_init(&priv->error_task, talitos_error, (unsigned long)dev);
@@ -1575,8 +1579,6 @@ static int talitos_probe(struct of_device *ofdev,
}
/* register crypto algorithms the device supports */
- INIT_LIST_HEAD(&priv->alg_list);
-
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
struct talitos_crypto_alg *t_alg;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 94df9177124..0778d99aea7 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -364,7 +364,7 @@ static void dw_dma_tasklet(unsigned long data)
int i;
status_block = dma_readl(dw, RAW.BLOCK);
- status_xfer = dma_readl(dw, RAW.BLOCK);
+ status_xfer = dma_readl(dw, RAW.XFER);
status_err = dma_readl(dw, RAW.ERROR);
dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index d568c65c137..d9e7a49d6cb 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -279,7 +279,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "OTES1 Fan", 36, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
- { 0x0011, NULL /* Abit AT8 32X, need DMI string */, {
+ { 0x0011, "AT8 32X(ATI RD580-ULI M1575)", {
{ "CPU Core", 0, 0, 10, 1, 0 },
{ "DDR", 1, 0, 20, 1, 0 },
{ "DDR VTT", 2, 0, 10, 1, 0 },
@@ -303,6 +303,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
{ "SYS Fan", 34, 2, 60, 1, 0 },
{ "AUX1 Fan", 35, 2, 60, 1, 0 },
{ "AUX2 Fan", 36, 2, 60, 1, 0 },
+ { "AUX3 Fan", 37, 2, 60, 1, 0 },
{ NULL, 0, 0, 0, 0, 0 } }
},
{ 0x0012, NULL /* Abit AN8 32X, need DMI string */, {
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index ce8d94fbfd7..bfda8c80ef2 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -69,7 +69,7 @@ static inline int ad7414_write(struct i2c_client *client, u8 reg, u8 value)
return i2c_smbus_write_byte_data(client, reg, value);
}
-struct ad7414_data *ad7414_update_device(struct device *dev)
+static struct ad7414_data *ad7414_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct ad7414_data *data = i2c_get_clientdata(client);
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index d191118ba0c..d6b490d3e36 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -31,7 +31,7 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("System voltages control via Attansic ATXP1");
-MODULE_VERSION("0.6.2");
+MODULE_VERSION("0.6.3");
MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
#define ATXP1_VID 0x00
@@ -289,16 +289,16 @@ static int atxp1_detect(struct i2c_client *new_client, int kind,
if (!((i2c_smbus_read_byte_data(new_client, 0x3e) == 0) &&
(i2c_smbus_read_byte_data(new_client, 0x3f) == 0) &&
(i2c_smbus_read_byte_data(new_client, 0xfe) == 0) &&
- (i2c_smbus_read_byte_data(new_client, 0xff) == 0) )) {
+ (i2c_smbus_read_byte_data(new_client, 0xff) == 0)))
+ return -ENODEV;
- /* No vendor ID, now checking if registers 0x10,0x11 (non-existent)
- * showing the same as register 0x00 */
- temp = i2c_smbus_read_byte_data(new_client, 0x00);
+ /* No vendor ID, now checking if registers 0x10,0x11 (non-existent)
+ * showing the same as register 0x00 */
+ temp = i2c_smbus_read_byte_data(new_client, 0x00);
- if (!((i2c_smbus_read_byte_data(new_client, 0x10) == temp) &&
- (i2c_smbus_read_byte_data(new_client, 0x11) == temp) ))
- return -ENODEV;
- }
+ if (!((i2c_smbus_read_byte_data(new_client, 0x10) == temp) &&
+ (i2c_smbus_read_byte_data(new_client, 0x11) == temp)))
+ return -ENODEV;
/* Get VRM */
temp = vid_which_vrm();
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 30cdb095677..d793cc01199 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -46,6 +46,8 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
+#include <linux/string.h>
+#include <linux/dmi.h>
#include <asm/io.h>
#define DRVNAME "it87"
@@ -236,6 +238,8 @@ struct it87_sio_data {
/* Values read from Super-I/O config space */
u8 revision;
u8 vid_value;
+ /* Values set based on DMI strings */
+ u8 skip_pwm;
};
/* For each registered chip, we need to keep some data in memory.
@@ -273,10 +277,10 @@ struct it87_data {
static inline int has_16bit_fans(const struct it87_data *data)
{
/* IT8705F Datasheet 0.4.1, 3h == Version G.
- IT8712F Datasheet 0.9.1, section 8.3.5 indicates 7h == Version I.
+ IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
These are the first revisions with 16bit tachometer support. */
return (data->type == it87 && data->revision >= 0x03)
- || (data->type == it8712 && data->revision >= 0x07)
+ || (data->type == it8712 && data->revision >= 0x08)
|| data->type == it8716
|| data->type == it8718;
}
@@ -964,6 +968,7 @@ static int __init it87_find(unsigned short *address,
{
int err = -ENODEV;
u16 chip_type;
+ const char *board_vendor, *board_name;
superio_enter();
chip_type = force_id ? force_id : superio_inw(DEVID);
@@ -1022,6 +1027,24 @@ static int __init it87_find(unsigned short *address,
pr_info("it87: in7 is VCCH (+5V Stand-By)\n");
}
+ /* Disable specific features based on DMI strings */
+ board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+ if (board_vendor && board_name) {
+ if (strcmp(board_vendor, "nVIDIA") == 0
+ && strcmp(board_name, "FN68PT") == 0) {
+ /* On the Shuttle SN68PT, FAN_CTL2 is apparently not
+ connected to a fan, but to something else. One user
+ has reported instant system power-off when changing
+ the PWM2 duty cycle, so we disable it.
+ I use the board name string as the trigger in case
+ the same board is ever used in other systems. */
+ pr_info("it87: Disabling pwm2 due to "
+ "hardware constraints\n");
+ sio_data->skip_pwm = (1 << 1);
+ }
+ }
+
exit:
superio_exit();
return err;
@@ -1168,25 +1191,33 @@ static int __devinit it87_probe(struct platform_device *pdev)
}
if (enable_pwm_interface) {
- if ((err = device_create_file(dev,
- &sensor_dev_attr_pwm1_enable.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_pwm2_enable.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_pwm3_enable.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_pwm1.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_pwm2.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_pwm3.dev_attr))
- || (err = device_create_file(dev,
- &dev_attr_pwm1_freq))
- || (err = device_create_file(dev,
- &dev_attr_pwm2_freq))
- || (err = device_create_file(dev,
- &dev_attr_pwm3_freq)))
- goto ERROR4;
+ if (!(sio_data->skip_pwm & (1 << 0))) {
+ if ((err = device_create_file(dev,
+ &sensor_dev_attr_pwm1_enable.dev_attr))
+ || (err = device_create_file(dev,
+ &sensor_dev_attr_pwm1.dev_attr))
+ || (err = device_create_file(dev,
+ &dev_attr_pwm1_freq)))
+ goto ERROR4;
+ }
+ if (!(sio_data->skip_pwm & (1 << 1))) {
+ if ((err = device_create_file(dev,
+ &sensor_dev_attr_pwm2_enable.dev_attr))
+ || (err = device_create_file(dev,
+ &sensor_dev_attr_pwm2.dev_attr))
+ || (err = device_create_file(dev,
+ &dev_attr_pwm2_freq)))
+ goto ERROR4;
+ }
+ if (!(sio_data->skip_pwm & (1 << 2))) {
+ if ((err = device_create_file(dev,
+ &sensor_dev_attr_pwm3_enable.dev_attr))
+ || (err = device_create_file(dev,
+ &sensor_dev_attr_pwm3.dev_attr))
+ || (err = device_create_file(dev,
+ &dev_attr_pwm3_freq)))
+ goto ERROR4;
+ }
}
if (data->type == it8712 || data->type == it8716
@@ -1546,6 +1577,7 @@ static int __init sm_it87_init(void)
unsigned short isa_address=0;
struct it87_sio_data sio_data;
+ memset(&sio_data, 0, sizeof(struct it87_sio_data));
err = it87_find(&isa_address, &sio_data);
if (err)
return err;
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 22f6d5c00d8..0e7b1c6724a 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -180,7 +180,7 @@ static const struct i2c_algorithm i2c_powermac_algorithm = {
};
-static int i2c_powermac_remove(struct platform_device *dev)
+static int __devexit i2c_powermac_remove(struct platform_device *dev)
{
struct i2c_adapter *adapter = platform_get_drvdata(dev);
struct pmac_i2c_bus *bus = i2c_get_adapdata(adapter);
@@ -200,7 +200,7 @@ static int i2c_powermac_remove(struct platform_device *dev)
}
-static int __devexit i2c_powermac_probe(struct platform_device *dev)
+static int __devinit i2c_powermac_probe(struct platform_device *dev)
{
struct pmac_i2c_bus *bus = dev->dev.platform_data;
struct device_node *parent = NULL;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index af4491fa7e3..307d976c9b6 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -583,8 +583,10 @@ static int __init i2c_dev_init(void)
goto out;
i2c_dev_class = class_create(THIS_MODULE, "i2c-dev");
- if (IS_ERR(i2c_dev_class))
+ if (IS_ERR(i2c_dev_class)) {
+ res = PTR_ERR(i2c_dev_class);
goto out_unreg_chrdev;
+ }
res = i2c_add_driver(&i2cdev_driver);
if (res)
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index fc735ab08ff..052879a6f85 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -292,6 +292,20 @@ config IDE_GENERIC
tristate "generic/default IDE chipset support"
depends on ALPHA || X86 || IA64 || M32R || MIPS
help
+ This is the generic IDE driver. This driver attaches to the
+ fixed legacy ports (e.g. on PCs 0x1f0/0x170, 0x1e8/0x168 and
+ so on). Please note that if this driver is built into the
+ kernel or loaded before other ATA (IDE or libata) drivers
+ and the controller is located at legacy ports, this driver
+ may grab those ports and thus can prevent the controller
+ specific driver from attaching.
+
+ Also, currently, IDE generic doesn't allow IRQ sharing
+ meaning that the IRQs it grabs won't be available to other
+ controllers sharing those IRQs which usually makes drivers
+ for those controllers fail. Generally, it's not a good idea
+ to load IDE generic driver on modern systems.
+
If unsure, say N.
config BLK_DEV_PLATFORM
@@ -766,10 +780,6 @@ config BLK_DEV_IDEDMA_PMAC
to transfer data to and from memory. Saying Y is safe and improves
performance.
-config BLK_DEV_IDE_SWARM
- tristate "IDE for Sibyte evaluation boards"
- depends on SIBYTE_SB1xxx_SOC
-
config BLK_DEV_IDE_AU1XXX
bool "IDE for AMD Alchemy Au1200"
depends on SOC_AU1200
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 49a8c589e34..03c2cb6a58b 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1113,7 +1113,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
if (write) {
/* disk has become write protected */
- if (cd->disk->policy) {
+ if (get_disk_ro(cd->disk)) {
cdrom_end_request(drive, 0);
return ide_stopped;
}
@@ -1661,7 +1661,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
cdi->mask &= ~CDC_PLAY_AUDIO;
mechtype = buf[8 + 6] >> 5;
- if (mechtype == mechtype_caddy || mechtype == mechtype_popup)
+ if (mechtype == mechtype_caddy ||
+ mechtype == mechtype_popup ||
+ (drive->atapi_flags & IDE_AFLAG_NO_AUTOCLOSE))
cdi->mask |= CDC_CLOSE_TRAY;
if (cdi->sanyo_slot > 0) {
@@ -1859,6 +1861,8 @@ static const struct cd_list_entry ide_cd_quirks_list[] = {
{ "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
{ "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
{ "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
+ { "Optiarc DVD RW AD-7200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
+ { "Optiarc DVD RW AD-7543A", NULL, IDE_AFLAG_NO_AUTOCLOSE },
{ NULL, NULL, 0 }
};
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 07ef88bd109..33ea8c04871 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -41,6 +41,12 @@
#include <asm/io.h>
#include <asm/div64.h>
+#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
+#define IDE_DISK_MINORS (1 << PARTN_BITS)
+#else
+#define IDE_DISK_MINORS 0
+#endif
+
struct ide_disk_obj {
ide_drive_t *drive;
ide_driver_t *driver;
@@ -1151,8 +1157,7 @@ static int ide_disk_probe(ide_drive_t *drive)
if (!idkp)
goto failed;
- g = alloc_disk_node(1 << PARTN_BITS,
- hwif_to_node(drive->hwif));
+ g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif));
if (!g)
goto out_free_idkp;
@@ -1178,9 +1183,11 @@ static int ide_disk_probe(ide_drive_t *drive)
} else
drive->attach = 1;
- g->minors = 1 << PARTN_BITS;
+ g->minors = IDE_DISK_MINORS;
g->driverfs_dev = &drive->gendev;
- g->flags = drive->removable ? GENHD_FL_REMOVABLE : 0;
+ g->flags |= GENHD_FL_EXT_DEVT;
+ if (drive->removable)
+ g->flags |= GENHD_FL_REMOVABLE;
set_capacity(g, idedisk_capacity(drive));
g->fops = &idedisk_ops;
add_disk(g);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index adc68275585..3fa07c0aeaa 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -211,7 +211,7 @@ int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
xcount = bcount & 0xffff;
if (is_trm290)
xcount = ((xcount >> 2) - 1) << 16;
- if (xcount == 0x0000) {
+ else if (xcount == 0x0000) {
/*
* Most chipsets correctly interpret a length of 0x0000 as 64KB,
* but at least one (e.g. CS5530) misinterprets it as zero (!).
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 994e41099b4..70aa86c8807 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1188,7 +1188,7 @@ static struct kobject *exact_match(dev_t dev, int *part, void *data)
{
struct gendisk *p = data;
*part &= (1 << PARTN_BITS) - 1;
- return &p->dev.kobj;
+ return &disk_to_dev(p)->kobj;
}
static int exact_lock(dev_t dev, void *data)
@@ -1492,7 +1492,7 @@ static struct device_attribute *ide_port_attrs[] = {
static int ide_sysfs_register_port(ide_hwif_t *hwif)
{
- int i, rc;
+ int i, uninitialized_var(rc);
for (i = 0; ide_port_attrs[i]; i++) {
rc = device_create_file(hwif->portdev, ide_port_attrs[i]);
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 1bce84b5663..3833189144e 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2338,7 +2338,7 @@ static void idetape_get_inquiry_results(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
struct ide_atapi_pc pc;
- char fw_rev[6], vendor_id[10], product_id[18];
+ char fw_rev[4], vendor_id[8], product_id[16];
idetape_create_inquiry_cmd(&pc);
if (idetape_queue_pc_tail(drive, &pc)) {
@@ -2350,11 +2350,11 @@ static void idetape_get_inquiry_results(ide_drive_t *drive)
memcpy(product_id, &pc.buf[16], 16);
memcpy(fw_rev, &pc.buf[32], 4);
- ide_fixstring(vendor_id, 10, 0);
- ide_fixstring(product_id, 18, 0);
- ide_fixstring(fw_rev, 6, 0);
+ ide_fixstring(vendor_id, 8, 0);
+ ide_fixstring(product_id, 16, 0);
+ ide_fixstring(fw_rev, 4, 0);
- printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
+ printk(KERN_INFO "ide-tape: %s <-> %s: %.8s %.16s rev %.4s\n",
drive->name, tape->name, vendor_id, product_id, fw_rev);
}
diff --git a/drivers/ide/mips/Makefile b/drivers/ide/mips/Makefile
index 677c7b2bac9..5873fa0b876 100644
--- a/drivers/ide/mips/Makefile
+++ b/drivers/ide/mips/Makefile
@@ -1,4 +1,3 @@
-obj-$(CONFIG_BLK_DEV_IDE_SWARM) += swarm.o
obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o
EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
deleted file mode 100644
index badf79fc9e3..00000000000
--- a/drivers/ide/mips/swarm.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Copyright (C) 2001, 2002, 2003 Broadcom Corporation
- * Copyright (C) 2004 MontaVista Software Inc.
- * Author: Manish Lachwani, mlachwani@mvista.com
- * Copyright (C) 2004 MIPS Technologies, Inc. All rights reserved.
- * Author: Maciej W. Rozycki <macro@mips.com>
- * Copyright (c) 2006, 2008 Maciej W. Rozycki
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- * Derived loosely from ide-pmac.c, so:
- * Copyright (C) 1998 Paul Mackerras.
- * Copyright (C) 1995-1998 Mark Lord
- */
-
-/*
- * Boards with SiByte processors so far have supported IDE devices via
- * the Generic Bus, PCI bus, and built-in PCMCIA interface. In all
- * cases, byte-swapping must be avoided for these devices (whereas
- * other PCI devices, for example, will require swapping). Any
- * SiByte-targetted kernel including IDE support will include this
- * file. Probing of a Generic Bus for an IDE device is controlled by
- * the definition of "SIBYTE_HAVE_IDE", which is provided by
- * <asm/sibyte/board.h> for Broadcom boards.
- */
-
-#include <linux/ide.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-
-#include <asm/io.h>
-
-#include <asm/sibyte/board.h>
-#include <asm/sibyte/sb1250_genbus.h>
-#include <asm/sibyte/sb1250_regs.h>
-
-#define DRV_NAME "ide-swarm"
-
-static char swarm_ide_string[] = DRV_NAME;
-
-static struct resource swarm_ide_resource = {
- .name = "SWARM GenBus IDE",
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device *swarm_ide_dev;
-
-static const struct ide_port_info swarm_port_info = {
- .name = DRV_NAME,
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA,
-};
-
-/*
- * swarm_ide_probe - if the board header indicates the existence of
- * Generic Bus IDE, allocate a HWIF for it.
- */
-static int __devinit swarm_ide_probe(struct device *dev)
-{
- u8 __iomem *base;
- struct ide_host *host;
- phys_t offset, size;
- int i, rc;
- hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
-
- if (!SIBYTE_HAVE_IDE)
- return -ENODEV;
-
- base = ioremap(A_IO_EXT_BASE, 0x800);
- offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS));
- size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS));
- iounmap(base);
-
- offset = G_IO_START_ADDR(offset) << S_IO_ADDRBASE;
- size = (G_IO_MULT_SIZE(size) + 1) << S_IO_REGSIZE;
- if (offset < A_PHYS_GENBUS || offset >= A_PHYS_GENBUS_END) {
- printk(KERN_INFO DRV_NAME
- ": IDE interface at GenBus disabled\n");
- return -EBUSY;
- }
-
- printk(KERN_INFO DRV_NAME ": IDE interface at GenBus slot %i\n",
- IDE_CS);
-
- swarm_ide_resource.start = offset;
- swarm_ide_resource.end = offset + size - 1;
- if (request_resource(&iomem_resource, &swarm_ide_resource)) {
- printk(KERN_ERR DRV_NAME
- ": can't request I/O memory resource\n");
- return -EBUSY;
- }
-
- base = ioremap(offset, size);
-
- for (i = 0; i <= 7; i++)
- hw.io_ports_array[i] =
- (unsigned long)(base + ((0x1f0 + i) << 5));
- hw.io_ports.ctl_addr =
- (unsigned long)(base + (0x3f6 << 5));
- hw.irq = K_INT_GB_IDE;
- hw.chipset = ide_generic;
-
- rc = ide_host_add(&swarm_port_info, hws, &host);
- if (rc)
- goto err;
-
- dev_set_drvdata(dev, host);
-
- return 0;
-err:
- release_resource(&swarm_ide_resource);
- iounmap(base);
- return rc;
-}
-
-static struct device_driver swarm_ide_driver = {
- .name = swarm_ide_string,
- .bus = &platform_bus_type,
- .probe = swarm_ide_probe,
-};
-
-static void swarm_ide_platform_release(struct device *device)
-{
- struct platform_device *pldev;
-
- /* free device */
- pldev = to_platform_device(device);
- kfree(pldev);
-}
-
-static int __devinit swarm_ide_init_module(void)
-{
- struct platform_device *pldev;
- int err;
-
- printk(KERN_INFO "SWARM IDE driver\n");
-
- if (driver_register(&swarm_ide_driver)) {
- printk(KERN_ERR "Driver registration failed\n");
- err = -ENODEV;
- goto out;
- }
-
- if (!(pldev = kzalloc(sizeof (*pldev), GFP_KERNEL))) {
- err = -ENOMEM;
- goto out_unregister_driver;
- }
-
- pldev->name = swarm_ide_string;
- pldev->id = 0;
- pldev->dev.release = swarm_ide_platform_release;
-
- if (platform_device_register(pldev)) {
- err = -ENODEV;
- goto out_free_pldev;
- }
-
- if (!pldev->dev.driver) {
- /*
- * The driver was not bound to this device, there was
- * no hardware at this address. Unregister it, as the
- * release fuction will take care of freeing the
- * allocated structure
- */
- platform_device_unregister (pldev);
- }
-
- swarm_ide_dev = pldev;
-
- return 0;
-
-out_free_pldev:
- kfree(pldev);
-
-out_unregister_driver:
- driver_unregister(&swarm_ide_driver);
-out:
- return err;
-}
-
-module_init(swarm_ide_init_module);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 922d35f4fc0..3cab0cedfca 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3748,6 +3748,7 @@ error1:
cm_remove_port_fs(port);
}
device_unregister(cm_dev->device);
+ kfree(cm_dev);
}
static void cm_remove_one(struct ib_device *ib_device)
@@ -3776,6 +3777,7 @@ static void cm_remove_one(struct ib_device *ib_device)
cm_remove_port_fs(port);
}
device_unregister(cm_dev->device);
+ kfree(cm_dev);
}
static int __init ib_cm_init(void)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1adf2efd3cb..49c45feccd5 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1697,9 +1697,8 @@ static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
u8 port_num = mad_agent_priv->agent.port_num;
u8 lmc;
- send_resp = ((struct ib_mad *)(wr->send_buf.mad))->
- mad_hdr.method & IB_MGMT_METHOD_RESP;
- rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;
+ send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
+ rcv_resp = ib_response_mad(rwc->recv_buf.mad);
if (send_resp == rcv_resp)
/* both requests, or both responses. GIDs different */
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 2acf9b62cf9..69580e282af 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -272,7 +272,6 @@ static struct ib_qp *c2_create_qp(struct ib_pd *pd,
pr_debug("%s: Invalid QP type: %d\n", __func__,
init_attr->qp_type);
return ERR_PTR(-EINVAL);
- break;
}
if (err) {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index eb778bfd6f6..ecff9804358 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1155,13 +1155,11 @@ static int iwch_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
PDBG("%s ibdev %p\n", __func__, ibdev);
+
+ memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096;
- props->lid = 0;
- props->lmc = 0;
- props->sm_lid = 0;
- props->sm_sl = 0;
+ props->active_mtu = IB_MTU_2048;
props->state = IB_PORT_ACTIVE;
- props->phys_state = 0;
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_SNMP_TUNNEL_SUP |
@@ -1170,7 +1168,6 @@ static int iwch_query_port(struct ib_device *ibdev,
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1;
props->pkey_tbl_len = 1;
- props->qkey_viol_cntr = 0;
props->active_width = 2;
props->active_speed = 2;
props->max_msg_sz = -1;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 1ab919f836a..5d7b7855afb 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -164,6 +164,13 @@ struct ehca_qmap_entry {
u16 reported;
};
+struct ehca_queue_map {
+ struct ehca_qmap_entry *map;
+ unsigned int entries;
+ unsigned int tail;
+ unsigned int left_to_poll;
+};
+
struct ehca_qp {
union {
struct ib_qp ib_qp;
@@ -173,8 +180,9 @@ struct ehca_qp {
enum ehca_ext_qp_type ext_type;
enum ib_qp_state state;
struct ipz_queue ipz_squeue;
- struct ehca_qmap_entry *sq_map;
+ struct ehca_queue_map sq_map;
struct ipz_queue ipz_rqueue;
+ struct ehca_queue_map rq_map;
struct h_galpas galpas;
u32 qkey;
u32 real_qp_num;
@@ -204,6 +212,8 @@ struct ehca_qp {
atomic_t nr_events; /* events seen */
wait_queue_head_t wait_completion;
int mig_armed;
+ struct list_head sq_err_node;
+ struct list_head rq_err_node;
};
#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
@@ -233,6 +243,8 @@ struct ehca_cq {
/* mmap counter for resources mapped into user space */
u32 mm_count_queue;
u32 mm_count_galpa;
+ struct list_head sqp_err_list;
+ struct list_head rqp_err_list;
};
enum ehca_mr_flag {
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 5540b276a33..33647a95eb9 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -276,6 +276,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
for (i = 0; i < QP_HASHTAB_LEN; i++)
INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
+ INIT_LIST_HEAD(&my_cq->sqp_err_list);
+ INIT_LIST_HEAD(&my_cq->rqp_err_list);
+
if (context) {
struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
struct ehca_create_cq_resp resp;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index a8a2ea585d2..8f7f282ead6 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -197,6 +197,8 @@ void ehca_poll_eqs(unsigned long data);
int ehca_calc_ipd(struct ehca_shca *shca, int port,
enum ib_rate path_rate, u32 *ipd);
+void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
+
#ifdef CONFIG_PPC_64K_PAGES
void *ehca_alloc_fw_ctrlblock(gfp_t flags);
void ehca_free_fw_ctrlblock(void *ptr);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index b6bcee03673..4dbe2870e01 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -396,6 +396,50 @@ static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
queue->is_small = (queue->page_size != 0);
}
+/* needs to be called with cq->spinlock held */
+void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
+{
+ struct list_head *list, *node;
+
+ /* TODO: support low latency QPs */
+ if (qp->ext_type == EQPT_LLQP)
+ return;
+
+ if (on_sq) {
+ list = &qp->send_cq->sqp_err_list;
+ node = &qp->sq_err_node;
+ } else {
+ list = &qp->recv_cq->rqp_err_list;
+ node = &qp->rq_err_node;
+ }
+
+ if (list_empty(node))
+ list_add_tail(node, list);
+
+ return;
+}
+
+static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->spinlock, flags);
+
+ if (!list_empty(node))
+ list_del_init(node);
+
+ spin_unlock_irqrestore(&cq->spinlock, flags);
+}
+
+static void reset_queue_map(struct ehca_queue_map *qmap)
+{
+ int i;
+
+ qmap->tail = 0;
+ for (i = 0; i < qmap->entries; i++)
+ qmap->map[i].reported = 1;
+}
+
/*
* Create an ib_qp struct that is either a QP or an SRQ, depending on
* the value of the is_srq parameter. If init_attr and srq_init_attr share
@@ -407,12 +451,11 @@ static struct ehca_qp *internal_create_qp(
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata, int is_srq)
{
- struct ehca_qp *my_qp;
+ struct ehca_qp *my_qp, *my_srq = NULL;
struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
ib_device);
struct ib_ucontext *context = NULL;
- u32 nr_qes;
u64 h_ret;
int is_llqp = 0, has_srq = 0;
int qp_type, max_send_sge, max_recv_sge, ret;
@@ -457,8 +500,7 @@ static struct ehca_qp *internal_create_qp(
/* handle SRQ base QPs */
if (init_attr->srq) {
- struct ehca_qp *my_srq =
- container_of(init_attr->srq, struct ehca_qp, ib_srq);
+ my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
has_srq = 1;
parms.ext_type = EQPT_SRQBASE;
@@ -716,15 +758,19 @@ static struct ehca_qp *internal_create_qp(
"and pages ret=%i", ret);
goto create_qp_exit2;
}
- nr_qes = my_qp->ipz_squeue.queue_length /
+
+ my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
my_qp->ipz_squeue.qe_size;
- my_qp->sq_map = vmalloc(nr_qes *
+ my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
sizeof(struct ehca_qmap_entry));
- if (!my_qp->sq_map) {
+ if (!my_qp->sq_map.map) {
ehca_err(pd->device, "Couldn't allocate squeue "
"map ret=%i", ret);
goto create_qp_exit3;
}
+ INIT_LIST_HEAD(&my_qp->sq_err_node);
+ /* to avoid the generation of bogus flush CQEs */
+ reset_queue_map(&my_qp->sq_map);
}
if (HAS_RQ(my_qp)) {
@@ -736,6 +782,25 @@ static struct ehca_qp *internal_create_qp(
"and pages ret=%i", ret);
goto create_qp_exit4;
}
+
+ my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
+ my_qp->ipz_rqueue.qe_size;
+ my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
+ sizeof(struct ehca_qmap_entry));
+ if (!my_qp->rq_map.map) {
+ ehca_err(pd->device, "Couldn't allocate squeue "
+ "map ret=%i", ret);
+ goto create_qp_exit5;
+ }
+ INIT_LIST_HEAD(&my_qp->rq_err_node);
+ /* to avoid the generation of bogus flush CQEs */
+ reset_queue_map(&my_qp->rq_map);
+ } else if (init_attr->srq) {
+ /* this is a base QP, use the queue map of the SRQ */
+ my_qp->rq_map = my_srq->rq_map;
+ INIT_LIST_HEAD(&my_qp->rq_err_node);
+
+ my_qp->ipz_rqueue = my_srq->ipz_rqueue;
}
if (is_srq) {
@@ -799,7 +864,7 @@ static struct ehca_qp *internal_create_qp(
if (ret) {
ehca_err(pd->device,
"Couldn't assign qp to send_cq ret=%i", ret);
- goto create_qp_exit6;
+ goto create_qp_exit7;
}
}
@@ -825,25 +890,29 @@ static struct ehca_qp *internal_create_qp(
if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
ehca_err(pd->device, "Copy to udata failed");
ret = -EINVAL;
- goto create_qp_exit7;
+ goto create_qp_exit8;
}
}
return my_qp;
-create_qp_exit7:
+create_qp_exit8:
ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
-create_qp_exit6:
+create_qp_exit7:
kfree(my_qp->mod_qp_parm);
+create_qp_exit6:
+ if (HAS_RQ(my_qp))
+ vfree(my_qp->rq_map.map);
+
create_qp_exit5:
if (HAS_RQ(my_qp))
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
create_qp_exit4:
if (HAS_SQ(my_qp))
- vfree(my_qp->sq_map);
+ vfree(my_qp->sq_map.map);
create_qp_exit3:
if (HAS_SQ(my_qp))
@@ -1035,6 +1104,101 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
return 0;
}
+static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
+ struct ehca_queue_map *qmap)
+{
+ void *wqe_v;
+ u64 q_ofs;
+ u32 wqe_idx;
+
+ /* convert real to abs address */
+ wqe_p = wqe_p & (~(1UL << 63));
+
+ wqe_v = abs_to_virt(wqe_p);
+
+ if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
+ ehca_gen_err("Invalid offset for calculating left cqes "
+ "wqe_p=%#lx wqe_v=%p\n", wqe_p, wqe_v);
+ return -EFAULT;
+ }
+
+ wqe_idx = q_ofs / ipz_queue->qe_size;
+ if (wqe_idx < qmap->tail)
+ qmap->left_to_poll = (qmap->entries - qmap->tail) + wqe_idx;
+ else
+ qmap->left_to_poll = wqe_idx - qmap->tail;
+
+ return 0;
+}
+
+static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
+{
+ u64 h_ret;
+ void *send_wqe_p, *recv_wqe_p;
+ int ret;
+ unsigned long flags;
+ int qp_num = my_qp->ib_qp.qp_num;
+
+ /* this hcall is not supported on base QPs */
+ if (my_qp->ext_type != EQPT_SRQBASE) {
+ /* get send and receive wqe pointer */
+ h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
+ my_qp->ipz_qp_handle, &my_qp->pf,
+ &send_wqe_p, &recv_wqe_p, 4);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "disable_and_get_wqe() "
+ "failed ehca_qp=%p qp_num=%x h_ret=%li",
+ my_qp, qp_num, h_ret);
+ return ehca2ib_return_code(h_ret);
+ }
+
+ /*
+ * acquire lock to ensure that nobody is polling the cq which
+ * could mean that the qmap->tail pointer is in an
+ * inconsistent state.
+ */
+ spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
+ ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
+ &my_qp->sq_map);
+ spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
+ if (ret)
+ return ret;
+
+
+ spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
+ ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
+ &my_qp->rq_map);
+ spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
+ if (ret)
+ return ret;
+ } else {
+ spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
+ my_qp->sq_map.left_to_poll = 0;
+ spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
+
+ spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
+ my_qp->rq_map.left_to_poll = 0;
+ spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
+ }
+
+ /* this assures flush cqes being generated only for pending wqes */
+ if ((my_qp->sq_map.left_to_poll == 0) &&
+ (my_qp->rq_map.left_to_poll == 0)) {
+ spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
+ ehca_add_to_err_list(my_qp, 1);
+ spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
+
+ if (HAS_RQ(my_qp)) {
+ spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
+ ehca_add_to_err_list(my_qp, 0);
+ spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
+ flags);
+ }
+ }
+
+ return 0;
+}
+
/*
* internal_modify_qp with circumvention to handle aqp0 properly
* smi_reset2init indicates if this is an internal reset-to-init-call for
@@ -1539,10 +1703,27 @@ static int internal_modify_qp(struct ib_qp *ibqp,
goto modify_qp_exit2;
}
}
+ if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) {
+ ret = check_for_left_cqes(my_qp, shca);
+ if (ret)
+ goto modify_qp_exit2;
+ }
if (statetrans == IB_QPST_ANY2RESET) {
ipz_qeit_reset(&my_qp->ipz_rqueue);
ipz_qeit_reset(&my_qp->ipz_squeue);
+
+ if (qp_cur_state == IB_QPS_ERR) {
+ del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
+
+ if (HAS_RQ(my_qp))
+ del_from_err_list(my_qp->recv_cq,
+ &my_qp->rq_err_node);
+ }
+ reset_queue_map(&my_qp->sq_map);
+
+ if (HAS_RQ(my_qp))
+ reset_queue_map(&my_qp->rq_map);
}
if (attr_mask & IB_QP_QKEY)
@@ -1958,6 +2139,16 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
idr_remove(&ehca_qp_idr, my_qp->token);
write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ /*
+ * SRQs will never get into an error list and do not have a recv_cq,
+ * so we need to skip them here.
+ */
+ if (HAS_RQ(my_qp) && !IS_SRQ(my_qp))
+ del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
+
+ if (HAS_SQ(my_qp))
+ del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
+
/* now wait until all pending events have completed */
wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
@@ -1983,7 +2174,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
if (qp_type == IB_QPT_GSI) {
struct ib_event event;
ehca_info(dev, "device %s: port %x is inactive.",
- shca->ib_device.name, port_num);
+ shca->ib_device.name, port_num);
event.device = &shca->ib_device;
event.event = IB_EVENT_PORT_ERR;
event.element.port_num = port_num;
@@ -1991,11 +2182,15 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
ib_dispatch_event(&event);
}
- if (HAS_RQ(my_qp))
+ if (HAS_RQ(my_qp)) {
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
+
+ vfree(my_qp->rq_map.map);
+ }
if (HAS_SQ(my_qp)) {
ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
- vfree(my_qp->sq_map);
+
+ vfree(my_qp->sq_map.map);
}
kmem_cache_free(qp_cache, my_qp);
atomic_dec(&shca->num_qps);
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 4426d82fe79..64928079eaf 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -53,9 +53,25 @@
/* in RC traffic, insert an empty RDMA READ every this many packets */
#define ACK_CIRC_THRESHOLD 2000000
+static u64 replace_wr_id(u64 wr_id, u16 idx)
+{
+ u64 ret;
+
+ ret = wr_id & ~QMAP_IDX_MASK;
+ ret |= idx & QMAP_IDX_MASK;
+
+ return ret;
+}
+
+static u16 get_app_wr_id(u64 wr_id)
+{
+ return wr_id & QMAP_IDX_MASK;
+}
+
static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
struct ehca_wqe *wqe_p,
- struct ib_recv_wr *recv_wr)
+ struct ib_recv_wr *recv_wr,
+ u32 rq_map_idx)
{
u8 cnt_ds;
if (unlikely((recv_wr->num_sge < 0) ||
@@ -69,7 +85,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
/* clear wqe header until sglist */
memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
- wqe_p->work_request_id = recv_wr->wr_id;
+ wqe_p->work_request_id = replace_wr_id(recv_wr->wr_id, rq_map_idx);
wqe_p->nr_of_data_seg = recv_wr->num_sge;
for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
@@ -146,6 +162,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
u64 dma_length;
struct ehca_av *my_av;
u32 remote_qkey = send_wr->wr.ud.remote_qkey;
+ struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx];
if (unlikely((send_wr->num_sge < 0) ||
(send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
@@ -158,11 +175,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
/* clear wqe header until sglist */
memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
- wqe_p->work_request_id = send_wr->wr_id & ~QMAP_IDX_MASK;
- wqe_p->work_request_id |= sq_map_idx & QMAP_IDX_MASK;
+ wqe_p->work_request_id = replace_wr_id(send_wr->wr_id, sq_map_idx);
- qp->sq_map[sq_map_idx].app_wr_id = send_wr->wr_id & QMAP_IDX_MASK;
- qp->sq_map[sq_map_idx].reported = 0;
+ qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
+ qmap_entry->reported = 0;
switch (send_wr->opcode) {
case IB_WR_SEND:
@@ -496,7 +512,9 @@ static int internal_post_recv(struct ehca_qp *my_qp,
struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int ret = 0;
+ u32 rq_map_idx;
unsigned long flags;
+ struct ehca_qmap_entry *qmap_entry;
if (unlikely(!HAS_RQ(my_qp))) {
ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
@@ -524,8 +542,15 @@ static int internal_post_recv(struct ehca_qp *my_qp,
}
goto post_recv_exit0;
}
+ /*
+ * Get the index of the WQE in the recv queue. The same index
+ * is used for writing into the rq_map.
+ */
+ rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
+
/* write a RECV WQE into the QUEUE */
- ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr);
+ ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr,
+ rq_map_idx);
/*
* if something failed,
* reset the free entry pointer to the start value
@@ -540,6 +565,11 @@ static int internal_post_recv(struct ehca_qp *my_qp,
}
goto post_recv_exit0;
}
+
+ qmap_entry = &my_qp->rq_map.map[rq_map_idx];
+ qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id);
+ qmap_entry->reported = 0;
+
wqe_cnt++;
} /* eof for cur_recv_wr */
@@ -596,10 +626,12 @@ static const u8 ib_wc_opcode[255] = {
/* internal function to poll one entry of cq */
static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
{
- int ret = 0;
+ int ret = 0, qmap_tail_idx;
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
struct ehca_cqe *cqe;
struct ehca_qp *my_qp;
+ struct ehca_qmap_entry *qmap_entry;
+ struct ehca_queue_map *qmap;
int cqe_count = 0, is_error;
repoll:
@@ -674,27 +706,52 @@ repoll:
goto repoll;
wc->qp = &my_qp->ib_qp;
- if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT)) {
- struct ehca_qmap_entry *qmap_entry;
+ if (is_error) {
/*
- * We got a send completion and need to restore the original
- * wr_id.
+ * set left_to_poll to 0 because in error state, we will not
+ * get any additional CQEs
*/
- qmap_entry = &my_qp->sq_map[cqe->work_request_id &
- QMAP_IDX_MASK];
+ ehca_add_to_err_list(my_qp, 1);
+ my_qp->sq_map.left_to_poll = 0;
- if (qmap_entry->reported) {
- ehca_warn(cq->device, "Double cqe on qp_num=%#x",
- my_qp->real_qp_num);
- /* found a double cqe, discard it and read next one */
- goto repoll;
- }
- wc->wr_id = cqe->work_request_id & ~QMAP_IDX_MASK;
- wc->wr_id |= qmap_entry->app_wr_id;
- qmap_entry->reported = 1;
- } else
+ if (HAS_RQ(my_qp))
+ ehca_add_to_err_list(my_qp, 0);
+ my_qp->rq_map.left_to_poll = 0;
+ }
+
+ qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
+ if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
+ /* We got a send completion. */
+ qmap = &my_qp->sq_map;
+ else
/* We got a receive completion. */
- wc->wr_id = cqe->work_request_id;
+ qmap = &my_qp->rq_map;
+
+ qmap_entry = &qmap->map[qmap_tail_idx];
+ if (qmap_entry->reported) {
+ ehca_warn(cq->device, "Double cqe on qp_num=%#x",
+ my_qp->real_qp_num);
+ /* found a double cqe, discard it and read next one */
+ goto repoll;
+ }
+
+ wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
+ qmap_entry->reported = 1;
+
+ /* this is a proper completion, we need to advance the tail pointer */
+ if (++qmap->tail == qmap->entries)
+ qmap->tail = 0;
+
+ /* if left_to_poll is decremented to 0, add the QP to the error list */
+ if (qmap->left_to_poll > 0) {
+ qmap->left_to_poll--;
+ if ((my_qp->sq_map.left_to_poll == 0) &&
+ (my_qp->rq_map.left_to_poll == 0)) {
+ ehca_add_to_err_list(my_qp, 1);
+ if (HAS_RQ(my_qp))
+ ehca_add_to_err_list(my_qp, 0);
+ }
+ }
/* eval ib_wc_opcode */
wc->opcode = ib_wc_opcode[cqe->optype]-1;
@@ -733,13 +790,88 @@ poll_cq_one_exit0:
return ret;
}
+static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
+ struct ib_wc *wc, int num_entries,
+ struct ipz_queue *ipz_queue, int on_sq)
+{
+ int nr = 0;
+ struct ehca_wqe *wqe;
+ u64 offset;
+ struct ehca_queue_map *qmap;
+ struct ehca_qmap_entry *qmap_entry;
+
+ if (on_sq)
+ qmap = &my_qp->sq_map;
+ else
+ qmap = &my_qp->rq_map;
+
+ qmap_entry = &qmap->map[qmap->tail];
+
+ while ((nr < num_entries) && (qmap_entry->reported == 0)) {
+ /* generate flush CQE */
+ memset(wc, 0, sizeof(*wc));
+
+ offset = qmap->tail * ipz_queue->qe_size;
+ wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
+ if (!wqe) {
+ ehca_err(cq->device, "Invalid wqe offset=%#lx on "
+ "qp_num=%#x", offset, my_qp->real_qp_num);
+ return nr;
+ }
+
+ wc->wr_id = replace_wr_id(wqe->work_request_id,
+ qmap_entry->app_wr_id);
+
+ if (on_sq) {
+ switch (wqe->optype) {
+ case WQE_OPTYPE_SEND:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case WQE_OPTYPE_RDMAWRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case WQE_OPTYPE_RDMAREAD:
+ wc->opcode = IB_WC_RDMA_READ;
+ break;
+ default:
+ ehca_err(cq->device, "Invalid optype=%x",
+ wqe->optype);
+ return nr;
+ }
+ } else
+ wc->opcode = IB_WC_RECV;
+
+ if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) {
+ wc->ex.imm_data = wqe->immediate_data;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ }
+
+ wc->status = IB_WC_WR_FLUSH_ERR;
+
+ wc->qp = &my_qp->ib_qp;
+
+ /* mark as reported and advance tail pointer */
+ qmap_entry->reported = 1;
+ if (++qmap->tail == qmap->entries)
+ qmap->tail = 0;
+ qmap_entry = &qmap->map[qmap->tail];
+
+ wc++; nr++;
+ }
+
+ return nr;
+
+}
+
int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
{
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
int nr;
+ struct ehca_qp *err_qp;
struct ib_wc *current_wc = wc;
int ret = 0;
unsigned long flags;
+ int entries_left = num_entries;
if (num_entries < 1) {
ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
@@ -749,15 +881,40 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
}
spin_lock_irqsave(&my_cq->spinlock, flags);
- for (nr = 0; nr < num_entries; nr++) {
+
+ /* generate flush cqes for send queues */
+ list_for_each_entry(err_qp, &my_cq->sqp_err_list, sq_err_node) {
+ nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
+ &err_qp->ipz_squeue, 1);
+ entries_left -= nr;
+ current_wc += nr;
+
+ if (entries_left == 0)
+ break;
+ }
+
+ /* generate flush cqes for receive queues */
+ list_for_each_entry(err_qp, &my_cq->rqp_err_list, rq_err_node) {
+ nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
+ &err_qp->ipz_rqueue, 0);
+ entries_left -= nr;
+ current_wc += nr;
+
+ if (entries_left == 0)
+ break;
+ }
+
+ for (nr = 0; nr < entries_left; nr++) {
ret = ehca_poll_cq_one(cq, current_wc);
if (ret)
break;
current_wc++;
} /* eof for nr */
+ entries_left -= nr;
+
spin_unlock_irqrestore(&my_cq->spinlock, flags);
if (ret == -EAGAIN || !ret)
- ret = nr;
+ ret = num_entries - entries_left;
poll_cq_exit0:
return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 97710522624..7b93cda1a4b 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -675,7 +675,8 @@ static void send_rc_ack(struct ipath_qp *qp)
hdr.lrh[0] = cpu_to_be16(lrh0);
hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
- hdr.lrh[3] = cpu_to_be16(dd->ipath_lid);
+ hdr.lrh[3] = cpu_to_be16(dd->ipath_lid |
+ qp->remote_ah_attr.src_path_bits);
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index af051f75766..fc0f6d9e603 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -618,7 +618,8 @@ void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
+ qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid |
+ qp->remote_ah_attr.src_path_bits);
bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
bth0 |= extra_bytes << 20;
ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index b766e40e9eb..eabc4247860 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -340,9 +340,16 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
int acc;
int ret;
unsigned long flags;
+ struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
spin_lock_irqsave(&qp->s_lock, flags);
+ if (qp->ibqp.qp_type != IB_QPT_SMI &&
+ !(dd->ipath_flags & IPATH_LINKACTIVE)) {
+ ret = -ENETDOWN;
+ goto bail;
+ }
+
/* Check that state is OK to post send. */
if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)))
goto bail_inval;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index f29dbb767e8..baa01deb243 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1058,6 +1058,9 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
else
sqd_event = 0;
+ if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ context->rlkey |= (1 << 4);
+
/*
* Before passing a kernel QP to the HW, make sure that the
* ownership bits of the send queue are set and the SQ
@@ -1342,6 +1345,12 @@ static __be32 convert_access(int acc)
static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
{
struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
+ int i;
+
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i)
+ wr->wr.fast_reg.page_list->page_list[i] =
+ cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] |
+ MLX4_MTT_FLAG_PRESENT);
fseg->flags = convert_access(wr->wr.fast_reg.access_flags);
fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey);
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index cc440f90000..65ad359fdf1 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -149,18 +149,10 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
((pci_resource_len(dev->pdev, 0) - 1) &
dev->catas_err.addr);
- if (!request_mem_region(addr, dev->catas_err.size * 4,
- DRV_NAME)) {
- mthca_warn(dev, "couldn't request catastrophic error region "
- "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4);
- return;
- }
-
dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4);
if (!dev->catas_err.map) {
mthca_warn(dev, "couldn't map catastrophic error region "
"at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4);
- release_mem_region(addr, dev->catas_err.size * 4);
return;
}
@@ -175,13 +167,8 @@ void mthca_stop_catas_poll(struct mthca_dev *dev)
{
del_timer_sync(&dev->catas_err.timer);
- if (dev->catas_err.map) {
+ if (dev->catas_err.map)
iounmap(dev->catas_err.map);
- release_mem_region(pci_resource_start(dev->pdev, 0) +
- ((pci_resource_len(dev->pdev, 0) - 1) &
- dev->catas_err.addr),
- dev->catas_err.size * 4);
- }
spin_lock_irq(&catas_lock);
list_del(&dev->catas_err.list);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index cc6858f0b65..28f0e0c40d7 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -652,27 +652,13 @@ static int mthca_map_reg(struct mthca_dev *dev,
{
unsigned long base = pci_resource_start(dev->pdev, 0);
- if (!request_mem_region(base + offset, size, DRV_NAME))
- return -EBUSY;
-
*map = ioremap(base + offset, size);
- if (!*map) {
- release_mem_region(base + offset, size);
+ if (!*map)
return -ENOMEM;
- }
return 0;
}
-static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset,
- unsigned long size, void __iomem *map)
-{
- unsigned long base = pci_resource_start(dev->pdev, 0);
-
- release_mem_region(base + offset, size);
- iounmap(map);
-}
-
static int mthca_map_eq_regs(struct mthca_dev *dev)
{
if (mthca_is_memfree(dev)) {
@@ -699,9 +685,7 @@ static int mthca_map_eq_regs(struct mthca_dev *dev)
dev->fw.arbel.eq_arm_base) + 4, 4,
&dev->eq_regs.arbel.eq_arm)) {
mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
- mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
- dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
- dev->clr_base);
+ iounmap(dev->clr_base);
return -ENOMEM;
}
@@ -710,12 +694,8 @@ static int mthca_map_eq_regs(struct mthca_dev *dev)
MTHCA_EQ_SET_CI_SIZE,
&dev->eq_regs.arbel.eq_set_ci_base)) {
mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
- mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
- dev->fw.arbel.eq_arm_base) + 4, 4,
- dev->eq_regs.arbel.eq_arm);
- mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
- dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
- dev->clr_base);
+ iounmap(dev->eq_regs.arbel.eq_arm);
+ iounmap(dev->clr_base);
return -ENOMEM;
}
} else {
@@ -731,8 +711,7 @@ static int mthca_map_eq_regs(struct mthca_dev *dev)
&dev->eq_regs.tavor.ecr_base)) {
mthca_err(dev, "Couldn't map ecr register, "
"aborting.\n");
- mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
- dev->clr_base);
+ iounmap(dev->clr_base);
return -ENOMEM;
}
}
@@ -744,22 +723,12 @@ static int mthca_map_eq_regs(struct mthca_dev *dev)
static void mthca_unmap_eq_regs(struct mthca_dev *dev)
{
if (mthca_is_memfree(dev)) {
- mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
- dev->fw.arbel.eq_set_ci_base,
- MTHCA_EQ_SET_CI_SIZE,
- dev->eq_regs.arbel.eq_set_ci_base);
- mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
- dev->fw.arbel.eq_arm_base) + 4, 4,
- dev->eq_regs.arbel.eq_arm);
- mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
- dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
- dev->clr_base);
+ iounmap(dev->eq_regs.arbel.eq_set_ci_base);
+ iounmap(dev->eq_regs.arbel.eq_arm);
+ iounmap(dev->clr_base);
} else {
- mthca_unmap_reg(dev, MTHCA_ECR_BASE,
- MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
- dev->eq_regs.tavor.ecr_base);
- mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
- dev->clr_base);
+ iounmap(dev->eq_regs.tavor.ecr_base);
+ iounmap(dev->clr_base);
}
}
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index fb9f91b60f3..52f60f4eea0 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -921,58 +921,6 @@ err_uar_table_free:
return err;
}
-static int mthca_request_regions(struct pci_dev *pdev, int ddr_hidden)
-{
- int err;
-
- /*
- * We can't just use pci_request_regions() because the MSI-X
- * table is right in the middle of the first BAR. If we did
- * pci_request_region and grab all of the first BAR, then
- * setting up MSI-X would fail, since the PCI core wants to do
- * request_mem_region on the MSI-X vector table.
- *
- * So just request what we need right now, and request any
- * other regions we need when setting up EQs.
- */
- if (!request_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
- MTHCA_HCR_SIZE, DRV_NAME))
- return -EBUSY;
-
- err = pci_request_region(pdev, 2, DRV_NAME);
- if (err)
- goto err_bar2_failed;
-
- if (!ddr_hidden) {
- err = pci_request_region(pdev, 4, DRV_NAME);
- if (err)
- goto err_bar4_failed;
- }
-
- return 0;
-
-err_bar4_failed:
- pci_release_region(pdev, 2);
-
-err_bar2_failed:
- release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
- MTHCA_HCR_SIZE);
-
- return err;
-}
-
-static void mthca_release_regions(struct pci_dev *pdev,
- int ddr_hidden)
-{
- if (!ddr_hidden)
- pci_release_region(pdev, 4);
-
- pci_release_region(pdev, 2);
-
- release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
- MTHCA_HCR_SIZE);
-}
-
static int mthca_enable_msi_x(struct mthca_dev *mdev)
{
struct msix_entry entries[3];
@@ -1059,7 +1007,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
ddr_hidden = 1;
- err = mthca_request_regions(pdev, ddr_hidden);
+ err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "Cannot obtain PCI resources, "
"aborting.\n");
@@ -1196,7 +1144,7 @@ err_free_dev:
ib_dealloc_device(&mdev->ib_dev);
err_free_res:
- mthca_release_regions(pdev, ddr_hidden);
+ pci_release_regions(pdev);
err_disable_pdev:
pci_disable_device(pdev);
@@ -1240,8 +1188,7 @@ static void __mthca_remove_one(struct pci_dev *pdev)
pci_disable_msix(pdev);
ib_dealloc_device(&mdev->ib_dev);
- mthca_release_regions(pdev, mdev->mthca_flags &
- MTHCA_FLAG_DDR_HIDDEN);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b0cab64e5e3..a2b04d62b1a 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -70,27 +70,31 @@ int interrupt_mod_interval = 0;
/* Interoperability */
int mpa_version = 1;
-module_param(mpa_version, int, 0);
+module_param(mpa_version, int, 0644);
MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)");
/* Interoperability */
int disable_mpa_crc = 0;
-module_param(disable_mpa_crc, int, 0);
+module_param(disable_mpa_crc, int, 0644);
MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC");
unsigned int send_first = 0;
-module_param(send_first, int, 0);
+module_param(send_first, int, 0644);
MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
unsigned int nes_drv_opt = 0;
-module_param(nes_drv_opt, int, 0);
+module_param(nes_drv_opt, int, 0644);
MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
unsigned int nes_debug_level = 0;
module_param_named(debug_level, nes_debug_level, uint, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug output level");
+unsigned int wqm_quanta = 0x10000;
+module_param(wqm_quanta, int, 0644);
+MODULE_PARM_DESC(wqm_quanta, "WQM quanta");
+
LIST_HEAD(nes_adapter_list);
static LIST_HEAD(nes_dev_list);
@@ -557,12 +561,32 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
goto bail5;
}
nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
+ nesdev->nesadapter->wqm_quanta = wqm_quanta;
/* nesdev->base_doorbell_index =
nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */
nesdev->base_doorbell_index = 1;
nesdev->doorbell_start = nesdev->nesadapter->doorbell_start;
- nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) % nesdev->nesadapter->port_count;
+ if (nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
+ switch (PCI_FUNC(nesdev->pcidev->devfn) %
+ nesdev->nesadapter->port_count) {
+ case 1:
+ nesdev->mac_index = 2;
+ break;
+ case 2:
+ nesdev->mac_index = 1;
+ break;
+ case 3:
+ nesdev->mac_index = 3;
+ break;
+ case 0:
+ default:
+ nesdev->mac_index = 0;
+ }
+ } else {
+ nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) %
+ nesdev->nesadapter->port_count;
+ }
tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
@@ -581,7 +605,7 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) |
(1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
if (PCI_FUNC(nesdev->pcidev->devfn) < 4) {
- nesdev->int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+24));
+ nesdev->int_req |= (1 << (PCI_FUNC(nesdev->mac_index)+24));
}
/* TODO: This really should be the first driver to load, not function 0 */
@@ -772,14 +796,14 @@ static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf)
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
- devfn = nesdev->nesadapter->devfn;
- bus_number = nesdev->nesadapter->bus_number;
+ devfn = nesdev->pcidev->devfn;
+ bus_number = nesdev->pcidev->bus->number;
break;
}
i++;
}
- return snprintf(buf, PAGE_SIZE, "%x:%x", bus_number, devfn);
+ return snprintf(buf, PAGE_SIZE, "%x:%x\n", bus_number, devfn);
}
static ssize_t nes_store_adapter(struct device_driver *ddp,
@@ -1050,6 +1074,55 @@ static ssize_t nes_store_idx_data(struct device_driver *ddp,
return strnlen(buf, count);
}
+
+/**
+ * nes_show_wqm_quanta
+ */
+static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf)
+{
+ u32 wqm_quanta_value = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ wqm_quanta_value = nesdev->nesadapter->wqm_quanta;
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta);
+}
+
+
+/**
+ * nes_store_wqm_quanta
+ */
+static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ unsigned long wqm_quanta_value;
+ u32 wqm_config1;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ strict_strtoul(buf, 0, &wqm_quanta_value);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nesdev->nesadapter->wqm_quanta = wqm_quanta_value;
+ wqm_config1 = nes_read_indexed(nesdev,
+ NES_IDX_WQM_CONFIG1);
+ nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG1,
+ ((wqm_quanta_value << 1) |
+ (wqm_config1 & 0x00000001)));
+ break;
+ }
+ i++;
+ }
+ return strnlen(buf, count);
+}
+
static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR,
nes_show_adapter, nes_store_adapter);
static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR,
@@ -1068,6 +1141,8 @@ static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR,
nes_show_idx_addr, nes_store_idx_addr);
static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR,
nes_show_idx_data, nes_store_idx_data);
+static DRIVER_ATTR(wqm_quanta, S_IRUSR | S_IWUSR,
+ nes_show_wqm_quanta, nes_store_wqm_quanta);
static int nes_create_driver_sysfs(struct pci_driver *drv)
{
@@ -1081,6 +1156,7 @@ static int nes_create_driver_sysfs(struct pci_driver *drv)
error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data);
error |= driver_create_file(&drv->driver, &driver_attr_idx_addr);
error |= driver_create_file(&drv->driver, &driver_attr_idx_data);
+ error |= driver_create_file(&drv->driver, &driver_attr_wqm_quanta);
return error;
}
@@ -1095,6 +1171,7 @@ static void nes_remove_driver_sysfs(struct pci_driver *drv)
driver_remove_file(&drv->driver, &driver_attr_nonidx_data);
driver_remove_file(&drv->driver, &driver_attr_idx_addr);
driver_remove_file(&drv->driver, &driver_attr_idx_data);
+ driver_remove_file(&drv->driver, &driver_attr_wqm_quanta);
}
/**
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 8eb7ae96974..1595dc7bba9 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -169,7 +169,7 @@ extern int disable_mpa_crc;
extern unsigned int send_first;
extern unsigned int nes_drv_opt;
extern unsigned int nes_debug_level;
-
+extern unsigned int wqm_quanta;
extern struct list_head nes_adapter_list;
extern atomic_t cm_connects;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 9f0b964b2c9..2caf9da81ad 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -52,7 +52,7 @@
#include <linux/random.h>
#include <linux/list.h>
#include <linux/threads.h>
-
+#include <net/arp.h>
#include <net/neighbour.h>
#include <net/route.h>
#include <net/ip_fib.h>
@@ -1019,23 +1019,43 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
/**
- * nes_addr_send_arp
+ * nes_addr_resolve_neigh
*/
-static void nes_addr_send_arp(u32 dst_ip)
+static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
{
struct rtable *rt;
struct flowi fl;
+ struct neighbour *neigh;
+ int rc = -1;
+ DECLARE_MAC_BUF(mac);
memset(&fl, 0, sizeof fl);
fl.nl_u.ip4_u.daddr = htonl(dst_ip);
if (ip_route_output_key(&init_net, &rt, &fl)) {
printk("%s: ip_route_output_key failed for 0x%08X\n",
__func__, dst_ip);
- return;
+ return rc;
+ }
+
+ neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, nesvnic->netdev);
+ if (neigh) {
+ if (neigh->nud_state & NUD_VALID) {
+ nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
+ " is %s, Gateway is 0x%08X \n", dst_ip,
+ print_mac(mac, neigh->ha), ntohl(rt->rt_gateway));
+ nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
+ dst_ip, NES_ARP_ADD);
+ rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
+ NES_ARP_RESOLVE);
+ }
+ neigh_release(neigh);
}
- neigh_event_send(rt->u.dst.neighbour, NULL);
+ if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
+ neigh_event_send(rt->u.dst.neighbour, NULL);
+
ip_rt_put(rt);
+ return rc;
}
@@ -1108,9 +1128,11 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
/* get the mac addr for the remote node */
arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
if (arpindex < 0) {
- kfree(cm_node);
- nes_addr_send_arp(cm_info->rem_addr);
- return NULL;
+ arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr);
+ if (arpindex < 0) {
+ kfree(cm_node);
+ return NULL;
+ }
}
/* copy the mac addr to node context */
@@ -1826,7 +1848,7 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
/**
* mini_cm_connect - make a connection node with params
*/
-struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
+static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
struct nes_vnic *nesvnic, u16 private_data_len,
void *private_data, struct nes_cm_info *cm_info)
{
@@ -1956,13 +1978,6 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
return ret;
cleanup_retrans_entry(cm_node);
cm_node->state = NES_CM_STATE_CLOSED;
- ret = send_fin(cm_node, NULL);
-
- if (cm_node->accept_pend) {
- BUG_ON(!cm_node->listener);
- atomic_dec(&cm_node->listener->pend_accepts_cnt);
- BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
- }
ret = send_reset(cm_node, NULL);
return ret;
@@ -2014,7 +2029,6 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
ret = rem_ref_cm_node(cm_core, cm_node);
break;
}
- cm_node->cm_id = NULL;
return ret;
}
@@ -2383,6 +2397,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
atomic_inc(&cm_disconnects);
cm_event.event = IW_CM_EVENT_DISCONNECT;
if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
+ issued_disconnect_reset = 1;
cm_event.status = IW_CM_EVENT_STATUS_RESET;
nes_debug(NES_DBG_CM, "Generating a CM "
"Disconnect Event (status reset) for "
@@ -2508,7 +2523,6 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
nes_debug(NES_DBG_CM, "Call close API\n");
g_cm_core->api->close(g_cm_core, nesqp->cm_node);
- nesqp->cm_node = NULL;
}
return ret;
@@ -2837,6 +2851,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->apbvt_set = 1;
nesqp->cm_node = cm_node;
cm_node->nesqp = nesqp;
+ nes_add_ref(&nesqp->ibqp);
return 0;
}
@@ -3167,7 +3182,6 @@ static void cm_event_connect_error(struct nes_cm_event *event)
if (ret)
printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
"ret=%d\n", __func__, __LINE__, ret);
- nes_rem_ref(&nesqp->ibqp);
cm_id->rem_ref(cm_id);
rem_ref_cm_node(event->cm_node->cm_core, event->cm_node);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 1513d4066f1..7c49cc882d7 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -55,18 +55,19 @@ u32 int_mod_cq_depth_24;
u32 int_mod_cq_depth_16;
u32 int_mod_cq_depth_4;
u32 int_mod_cq_depth_1;
-
+static const u8 nes_max_critical_error_count = 100;
#include "nes_cm.h"
static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq);
static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count);
static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
- u8 OneG_Mode);
+ struct nes_adapter *nesadapter, u8 OneG_Mode);
static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq);
static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq);
static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
struct nes_hw_aeqe *aeqe);
+static void process_critical_error(struct nes_device *nesdev);
static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
@@ -222,11 +223,10 @@ static void nes_nic_tune_timer(struct nes_device *nesdev)
}
/* boundary checking */
- if (shared_timer->timer_in_use > NES_NIC_FAST_TIMER_HIGH)
- shared_timer->timer_in_use = NES_NIC_FAST_TIMER_HIGH;
- else if (shared_timer->timer_in_use < NES_NIC_FAST_TIMER_LOW) {
- shared_timer->timer_in_use = NES_NIC_FAST_TIMER_LOW;
- }
+ if (shared_timer->timer_in_use > shared_timer->threshold_high)
+ shared_timer->timer_in_use = shared_timer->threshold_high;
+ else if (shared_timer->timer_in_use < shared_timer->threshold_low)
+ shared_timer->timer_in_use = shared_timer->threshold_low;
nesdev->currcq_count = 0;
@@ -292,9 +292,6 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
if ((port_count = nes_reset_adapter_ne020(nesdev, &OneG_Mode)) == 0)
return NULL;
- if (nes_init_serdes(nesdev, hw_rev, port_count, OneG_Mode))
- return NULL;
- nes_init_csr_ne020(nesdev, hw_rev, port_count);
max_qp = nes_read_indexed(nesdev, NES_IDX_QP_CTX_SIZE);
nes_debug(NES_DBG_INIT, "QP_CTX_SIZE=%u\n", max_qp);
@@ -353,6 +350,22 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n",
nesadapter, (u32)sizeof(struct nes_adapter), adapter_size);
+ if (nes_read_eeprom_values(nesdev, nesadapter)) {
+ printk(KERN_ERR PFX "Unable to read EEPROM data.\n");
+ kfree(nesadapter);
+ return NULL;
+ }
+
+ if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter,
+ OneG_Mode)) {
+ kfree(nesadapter);
+ return NULL;
+ }
+ nes_init_csr_ne020(nesdev, hw_rev, port_count);
+
+ memset(nesadapter->pft_mcast_map, 255,
+ sizeof nesadapter->pft_mcast_map);
+
/* populate the new nesadapter */
nesadapter->devfn = nesdev->pcidev->devfn;
nesadapter->bus_number = nesdev->pcidev->bus->number;
@@ -468,20 +481,25 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
/* setup port configuration */
if (nesadapter->port_count == 1) {
- u32temp = 0x00000000;
+ nesadapter->log_port = 0x00000000;
if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT)
nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000002);
else
nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
} else {
- if (nesadapter->port_count == 2)
- u32temp = 0x00000044;
- else
- u32temp = 0x000000e4;
+ if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
+ nesadapter->log_port = 0x000000D8;
+ } else {
+ if (nesadapter->port_count == 2)
+ nesadapter->log_port = 0x00000044;
+ else
+ nesadapter->log_port = 0x000000e4;
+ }
nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
}
- nes_write_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT, u32temp);
+ nes_write_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT,
+ nesadapter->log_port);
nes_debug(NES_DBG_INIT, "Probe time, LOG2PHY=%u\n",
nes_read_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT));
@@ -706,23 +724,43 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
* nes_init_serdes
*/
static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
- u8 OneG_Mode)
+ struct nes_adapter *nesadapter, u8 OneG_Mode)
{
int i;
u32 u32temp;
+ u32 serdes_common_control;
if (hw_rev != NE020_REV) {
/* init serdes 0 */
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
- if (!OneG_Mode)
+ if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
+ serdes_common_control = nes_read_indexed(nesdev,
+ NES_IDX_ETH_SERDES_COMMON_CONTROL0);
+ serdes_common_control |= 0x000000100;
+ nes_write_indexed(nesdev,
+ NES_IDX_ETH_SERDES_COMMON_CONTROL0,
+ serdes_common_control);
+ } else if (!OneG_Mode) {
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
- if (port_count > 1) {
+ }
+ if (((port_count > 1) &&
+ (nesadapter->phy_type[0] != NES_PHY_TYPE_PUMA_1G)) ||
+ ((port_count > 2) &&
+ (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G))) {
/* init serdes 1 */
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
- if (!OneG_Mode)
+ if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
+ serdes_common_control = nes_read_indexed(nesdev,
+ NES_IDX_ETH_SERDES_COMMON_CONTROL1);
+ serdes_common_control |= 0x000000100;
+ nes_write_indexed(nesdev,
+ NES_IDX_ETH_SERDES_COMMON_CONTROL1,
+ serdes_common_control);
+ } else if (!OneG_Mode) {
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000);
}
+ }
} else {
/* init serdes 0 */
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
@@ -826,7 +864,8 @@ static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_cou
nes_write_indexed(nesdev, 0x00005000, 0x00018000);
/* nes_write_indexed(nesdev, 0x00005000, 0x00010000); */
- nes_write_indexed(nesdev, 0x00005004, 0x00020001);
+ nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG1, (wqm_quanta << 1) |
+ 0x00000001);
nes_write_indexed(nesdev, 0x00005008, 0x1F1F1F1F);
nes_write_indexed(nesdev, 0x00005010, 0x1F1F1F1F);
nes_write_indexed(nesdev, 0x00005018, 0x1F1F1F1F);
@@ -1226,6 +1265,7 @@ int nes_init_phy(struct nes_device *nesdev)
if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
printk(PFX "%s: Programming mdc config for 1G\n", __func__);
tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+ tx_config &= 0xFFFFFFE3;
tx_config |= 0x04;
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
}
@@ -1291,7 +1331,8 @@ int nes_init_phy(struct nes_device *nesdev)
(nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
/* setup 10G MDIO operation */
tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
- tx_config |= 0x14;
+ tx_config &= 0xFFFFFFE3;
+ tx_config |= 0x15;
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
}
if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
@@ -1315,7 +1356,7 @@ int nes_init_phy(struct nes_device *nesdev)
nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc319, 0x0008);
nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc31a, 0x0098);
nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0026, 0x0E00);
- nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0027, 0x0000);
+ nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0027, 0x0001);
nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0028, 0xA528);
/*
@@ -1759,9 +1800,14 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
*/
void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
{
+ u64 u64temp;
+ dma_addr_t bus_address;
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_hw_nic_sq_wqe *nic_sqe;
struct nes_hw_nic_rq_wqe *nic_rqe;
+ __le16 *wqe_fragment_length;
+ u16 wqe_fragment_index;
u64 wqe_frag;
u32 cqp_head;
unsigned long flags;
@@ -1770,14 +1816,69 @@ void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
/* Free remaining NIC receive buffers */
while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) {
nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail];
- wqe_frag = (u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
- wqe_frag |= ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32;
+ wqe_frag = (u64)le32_to_cpu(
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
+ wqe_frag |= ((u64)le32_to_cpu(
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX]))<<32;
pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag,
nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]);
nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1);
}
+ /* Free remaining NIC transmit buffers */
+ while (nesvnic->nic.sq_head != nesvnic->nic.sq_tail) {
+ nic_sqe = &nesvnic->nic.sq_vbase[nesvnic->nic.sq_tail];
+ wqe_fragment_index = 1;
+ wqe_fragment_length = (__le16 *)
+ &nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
+ /* bump past the vlan tag */
+ wqe_fragment_length++;
+ if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) {
+ u64temp = (u64)le32_to_cpu(
+ nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+
+ wqe_fragment_index*2]);
+ u64temp += ((u64)le32_to_cpu(
+ nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX
+ + wqe_fragment_index*2]))<<32;
+ bus_address = (dma_addr_t)u64temp;
+ if (test_and_clear_bit(nesvnic->nic.sq_tail,
+ nesvnic->nic.first_frag_overflow)) {
+ pci_unmap_single(nesdev->pcidev,
+ bus_address,
+ le16_to_cpu(wqe_fragment_length[
+ wqe_fragment_index++]),
+ PCI_DMA_TODEVICE);
+ }
+ for (; wqe_fragment_index < 5; wqe_fragment_index++) {
+ if (wqe_fragment_length[wqe_fragment_index]) {
+ u64temp = le32_to_cpu(
+ nic_sqe->wqe_words[
+ NES_NIC_SQ_WQE_FRAG0_LOW_IDX+
+ wqe_fragment_index*2]);
+ u64temp += ((u64)le32_to_cpu(
+ nic_sqe->wqe_words[
+ NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+
+ wqe_fragment_index*2]))<<32;
+ bus_address = (dma_addr_t)u64temp;
+ pci_unmap_page(nesdev->pcidev,
+ bus_address,
+ le16_to_cpu(
+ wqe_fragment_length[
+ wqe_fragment_index]),
+ PCI_DMA_TODEVICE);
+ } else
+ break;
+ }
+ }
+ if (nesvnic->nic.tx_skb[nesvnic->nic.sq_tail])
+ dev_kfree_skb(
+ nesvnic->nic.tx_skb[nesvnic->nic.sq_tail]);
+
+ nesvnic->nic.sq_tail = (++nesvnic->nic.sq_tail)
+ & (nesvnic->nic.sq_size - 1);
+ }
+
spin_lock_irqsave(&nesdev->cqp.lock, flags);
/* Destroy NIC QP */
@@ -1894,7 +1995,30 @@ int nes_napi_isr(struct nes_device *nesdev)
}
}
-
+static void process_critical_error(struct nes_device *nesdev)
+{
+ u32 debug_error;
+ u32 nes_idx_debug_error_masks0 = 0;
+ u16 error_module = 0;
+
+ debug_error = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS);
+ printk(KERN_ERR PFX "Critical Error reported by device!!! 0x%02X\n",
+ (u16)debug_error);
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS,
+ 0x01010000 | (debug_error & 0x0000ffff));
+ if (crit_err_count++ > 10)
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17);
+ error_module = (u16) (debug_error & 0x1F00) >> 8;
+ if (++nesdev->nesadapter->crit_error_count[error_module-1] >=
+ nes_max_critical_error_count) {
+ printk(KERN_ERR PFX "Masking off critical error for module "
+ "0x%02X\n", (u16)error_module);
+ nes_idx_debug_error_masks0 = nes_read_indexed(nesdev,
+ NES_IDX_DEBUG_ERROR_MASKS0);
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0,
+ nes_idx_debug_error_masks0 | (1 << error_module));
+ }
+}
/**
* nes_dpc
*/
@@ -1909,7 +2033,6 @@ void nes_dpc(unsigned long param)
u32 timer_stat;
u32 temp_int_stat;
u32 intf_int_stat;
- u32 debug_error;
u32 processed_intf_int = 0;
u16 processed_timer_int = 0;
u16 completion_ints = 0;
@@ -1987,14 +2110,7 @@ void nes_dpc(unsigned long param)
intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT);
intf_int_stat &= nesdev->intf_int_req;
if (NES_INTF_INT_CRITERR & intf_int_stat) {
- debug_error = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS);
- printk(KERN_ERR PFX "Critical Error reported by device!!! 0x%02X\n",
- (u16)debug_error);
- nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS,
- 0x01010000 | (debug_error & 0x0000ffff));
- /* BUG(); */
- if (crit_err_count++ > 10)
- nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17);
+ process_critical_error(nesdev);
}
if (NES_INTF_INT_PCIERR & intf_int_stat) {
printk(KERN_ERR PFX "PCI Error reported by device!!!\n");
@@ -2258,7 +2374,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
/* read the PHY interrupt status register */
- if (nesadapter->OneG_Mode) {
+ if ((nesadapter->OneG_Mode) &&
+ (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
do {
nes_read_1G_phy_reg(nesdev, 0x1a,
nesadapter->phy_index[mac_index], &phy_data);
@@ -3077,6 +3194,22 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
nes_cm_disconn(nesqp);
break;
/* TODO: additional AEs need to be here */
+ case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
+ nesqp = *((struct nes_qp **)&context);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_ACCESS_ERR;
+ nesqp->ibqp.event_handler(&ibevent,
+ nesqp->ibqp.qp_context);
+ }
+ nes_cm_disconn(nesqp);
+ break;
default:
nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
async_event_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 7b81e0ae007..610b9d85959 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -156,6 +156,7 @@ enum indexed_regs {
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI = 0x7004,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO = 0x7008,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI = 0x700c,
+ NES_IDX_WQM_CONFIG1 = 0x5004,
NES_IDX_CM_CONFIG = 0x5100,
NES_IDX_NIC_LOGPORT_TO_PHYPORT = 0x6000,
NES_IDX_NIC_PHYPORT_TO_USW = 0x6008,
@@ -967,6 +968,7 @@ struct nes_arp_entry {
#define DEFAULT_JUMBO_NES_QL_TARGET 40
#define DEFAULT_JUMBO_NES_QL_HIGH 128
#define NES_NIC_CQ_DOWNWARD_TREND 16
+#define NES_PFT_SIZE 48
struct nes_hw_tune_timer {
/* u16 cq_count; */
@@ -1079,6 +1081,7 @@ struct nes_adapter {
u32 et_rx_max_coalesced_frames_high;
u32 et_rate_sample_interval;
u32 timer_int_limit;
+ u32 wqm_quanta;
/* Adapter base MAC address */
u32 mac_addr_low;
@@ -1094,12 +1097,14 @@ struct nes_adapter {
u16 pd_config_base[4];
u16 link_interrupt_count[4];
+ u8 crit_error_count[32];
/* the phy index for each port */
u8 phy_index[4];
u8 mac_sw_state[4];
u8 mac_link_down[4];
u8 phy_type[4];
+ u8 log_port;
/* PCI information */
unsigned int devfn;
@@ -1113,6 +1118,7 @@ struct nes_adapter {
u8 virtwq;
u8 et_use_adaptive_rx_coalesce;
u8 adapter_fcn_count;
+ u8 pft_mcast_map[NES_PFT_SIZE];
};
struct nes_pbl {
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 1b0938c8777..730358637bb 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -91,6 +91,7 @@ static struct nic_qp_map *nic_qp_mapping_per_function[] = {
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
| NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
static int debug = -1;
+static int nics_per_function = 1;
/**
* nes_netdev_poll
@@ -201,7 +202,8 @@ static int nes_netdev_open(struct net_device *netdev)
nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW"
" (Addr:%08X) = %08X, HIGH = %08X.\n",
i, nesvnic->qp_nic_index[i],
- NES_IDX_PERFECT_FILTER_LOW+((nesvnic->perfect_filter_index + i) * 8),
+ NES_IDX_PERFECT_FILTER_LOW+
+ (nesvnic->qp_nic_index[i] * 8),
macaddr_low,
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)nesvnic->nic_index) << 16)));
@@ -272,14 +274,18 @@ static int nes_netdev_stop(struct net_device *netdev)
break;
}
- if (first_nesvnic->netdev_open == 0)
+ if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic) &&
+ (PCI_FUNC(first_nesvnic->nesdev->pcidev->devfn) !=
+ PCI_FUNC(nesvnic->nesdev->pcidev->devfn))) {
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+
+ (0x200*nesdev->mac_index), 0xffffffff);
+ nes_write_indexed(first_nesvnic->nesdev,
+ NES_IDX_MAC_INT_MASK+
+ (0x200*first_nesvnic->nesdev->mac_index),
+ ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
+ NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
+ } else {
nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff);
- else if ((first_nesvnic != nesvnic) &&
- (PCI_FUNC(first_nesvnic->nesdev->pcidev->devfn) != PCI_FUNC(nesvnic->nesdev->pcidev->devfn))) {
- nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK + (0x200 * nesdev->mac_index), 0xffffffff);
- nes_write_indexed(first_nesvnic->nesdev, NES_IDX_MAC_INT_MASK + (0x200 * first_nesvnic->nesdev->mac_index),
- ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
- NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
}
nic_active_mask = ~((u32)(1 << nesvnic->nic_index));
@@ -437,7 +443,7 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
struct nes_hw_nic_sq_wqe *nic_sqe;
struct tcphdr *tcph;
/* struct udphdr *udph; */
-#define NES_MAX_TSO_FRAGS 18
+#define NES_MAX_TSO_FRAGS MAX_SKB_FRAGS
/* 64K segment plus overflow on each side */
dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS];
dma_addr_t bus_address;
@@ -605,6 +611,8 @@ tso_sq_no_longer_full:
wqe_fragment_length[wqe_fragment_index] = 0;
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
bus_address);
+ tso_wqe_length += skb_headlen(skb) -
+ original_first_length;
}
while (wqe_fragment_index < 5) {
wqe_fragment_length[wqe_fragment_index] =
@@ -827,6 +835,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
struct dev_mc_list *multicast_addr;
u32 nic_active_bit;
u32 nic_active;
@@ -836,7 +845,12 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
u8 mc_all_on = 0;
u8 mc_index;
int mc_nic_index = -1;
+ u8 pft_entries_preallocated = max(nesadapter->adapter_fcn_count *
+ nics_per_function, 4);
+ u8 max_pft_entries_avaiable = NES_PFT_SIZE - pft_entries_preallocated;
+ unsigned long flags;
+ spin_lock_irqsave(&nesadapter->resource_lock, flags);
nic_active_bit = 1 << nesvnic->nic_index;
if (netdev->flags & IFF_PROMISC) {
@@ -847,7 +861,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
mc_all_on = 1;
- } else if ((netdev->flags & IFF_ALLMULTI) || (netdev->mc_count > NES_MULTICAST_PF_MAX) ||
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
(nesvnic->nic_index > 3)) {
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active |= nic_active_bit;
@@ -866,17 +880,34 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
}
nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n",
- netdev->mc_count, (netdev->flags & IFF_PROMISC)?1:0,
- (netdev->flags & IFF_ALLMULTI)?1:0);
+ netdev->mc_count, !!(netdev->flags & IFF_PROMISC),
+ !!(netdev->flags & IFF_ALLMULTI));
if (!mc_all_on) {
multicast_addr = netdev->mc_list;
- perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + 0x80;
- perfect_filter_register_address += nesvnic->nic_index*0x40;
- for (mc_index=0; mc_index < NES_MULTICAST_PF_MAX; mc_index++) {
- while (multicast_addr && nesvnic->mcrq_mcast_filter && ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic, multicast_addr->dmi_addr)) == 0))
+ perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
+ pft_entries_preallocated * 0x8;
+ for (mc_index = 0; mc_index < max_pft_entries_avaiable;
+ mc_index++) {
+ while (multicast_addr && nesvnic->mcrq_mcast_filter &&
+ ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic,
+ multicast_addr->dmi_addr)) == 0)) {
multicast_addr = multicast_addr->next;
+ }
if (mc_nic_index < 0)
mc_nic_index = nesvnic->nic_index;
+ while (nesadapter->pft_mcast_map[mc_index] < 16 &&
+ nesadapter->pft_mcast_map[mc_index] !=
+ nesvnic->nic_index &&
+ mc_index < max_pft_entries_avaiable) {
+ nes_debug(NES_DBG_NIC_RX,
+ "mc_index=%d skipping nic_index=%d,\
+ used for=%d \n", mc_index,
+ nesvnic->nic_index,
+ nesadapter->pft_mcast_map[mc_index]);
+ mc_index++;
+ }
+ if (mc_index >= max_pft_entries_avaiable)
+ break;
if (multicast_addr) {
DECLARE_MAC_BUF(mac);
nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n",
@@ -897,15 +928,33 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)(1<<mc_nic_index)) << 16)));
multicast_addr = multicast_addr->next;
+ nesadapter->pft_mcast_map[mc_index] =
+ nesvnic->nic_index;
} else {
nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n",
perfect_filter_register_address+(mc_index * 8));
nes_write_indexed(nesdev,
perfect_filter_register_address+4+(mc_index * 8),
0);
+ nesadapter->pft_mcast_map[mc_index] = 255;
}
}
+ /* PFT is not large enough */
+ if (multicast_addr && multicast_addr->next) {
+ nic_active = nes_read_indexed(nesdev,
+ NES_IDX_NIC_MULTICAST_ALL);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL,
+ nic_active);
+ nic_active = nes_read_indexed(nesdev,
+ NES_IDX_NIC_UNICAST_ALL);
+ nic_active &= ~nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL,
+ nic_active);
+ }
}
+
+ spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
}
@@ -918,6 +967,10 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
struct nes_device *nesdev = nesvnic->nesdev;
int ret = 0;
u8 jumbomode = 0;
+ u32 nic_active;
+ u32 nic_active_bit;
+ u32 uc_all_active;
+ u32 mc_all_active;
if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
return -EINVAL;
@@ -931,8 +984,24 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
nes_nic_init_timer_defaults(nesdev, jumbomode);
if (netif_running(netdev)) {
+ nic_active_bit = 1 << nesvnic->nic_index;
+ mc_all_active = nes_read_indexed(nesdev,
+ NES_IDX_NIC_MULTICAST_ALL) & nic_active_bit;
+ uc_all_active = nes_read_indexed(nesdev,
+ NES_IDX_NIC_UNICAST_ALL) & nic_active_bit;
+
nes_netdev_stop(netdev);
nes_netdev_open(netdev);
+
+ nic_active = nes_read_indexed(nesdev,
+ NES_IDX_NIC_MULTICAST_ALL);
+ nic_active |= mc_all_active;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL,
+ nic_active);
+
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
+ nic_active |= uc_all_active;
+ nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
}
return ret;
@@ -1208,10 +1277,12 @@ static void nes_netdev_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
strcpy(drvinfo->driver, DRV_NAME);
strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev));
- strcpy(drvinfo->fw_version, "TBD");
+ sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16,
+ nesadapter->firmware_version & 0x000000ff);
strcpy(drvinfo->version, DRV_VERSION);
drvinfo->n_stats = nes_netdev_get_stats_count(netdev);
drvinfo->testinfo_len = 0;
@@ -1587,7 +1658,9 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id,
nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index);
- if (nesvnic->nesdev->nesadapter->port_count == 1) {
+ if (nesvnic->nesdev->nesadapter->port_count == 1 &&
+ nesvnic->nesdev->nesadapter->adapter_fcn_count == 1) {
+
nesvnic->qp_nic_index[0] = nesvnic->nic_index;
nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1;
if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) {
@@ -1598,11 +1671,14 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3;
}
} else {
- if (nesvnic->nesdev->nesadapter->port_count == 2) {
- nesvnic->qp_nic_index[0] = nesvnic->nic_index;
- nesvnic->qp_nic_index[1] = nesvnic->nic_index + 2;
- nesvnic->qp_nic_index[2] = 0xf;
- nesvnic->qp_nic_index[3] = 0xf;
+ if (nesvnic->nesdev->nesadapter->port_count == 2 ||
+ (nesvnic->nesdev->nesadapter->port_count == 1 &&
+ nesvnic->nesdev->nesadapter->adapter_fcn_count == 2)) {
+ nesvnic->qp_nic_index[0] = nesvnic->nic_index;
+ nesvnic->qp_nic_index[1] = nesvnic->nic_index
+ + 2;
+ nesvnic->qp_nic_index[2] = 0xf;
+ nesvnic->qp_nic_index[3] = 0xf;
} else {
nesvnic->qp_nic_index[0] = nesvnic->nic_index;
nesvnic->qp_nic_index[1] = 0xf;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index d79942e8497..932e56fcf77 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1467,7 +1467,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
default:
nes_debug(NES_DBG_QP, "Invalid QP type: %d\n", init_attr->qp_type);
return ERR_PTR(-EINVAL);
- break;
}
/* update the QP table */
@@ -2498,7 +2497,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nes_debug(NES_DBG_MR, "Leaving, ibmr=%p", ibmr);
return ibmr;
- break;
case IWNES_MEMREG_TYPE_QP:
case IWNES_MEMREG_TYPE_CQ:
nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
@@ -2572,7 +2570,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
nesmr->ibmr.lkey = -1;
nesmr->mode = req.reg_type;
return &nesmr->ibmr;
- break;
}
return ERR_PTR(-ENOSYS);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index b0ffc9abe8c..68ba5c3482e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -268,10 +268,9 @@ struct ipoib_lro {
};
/*
- * Device private locking: tx_lock protects members used in TX fast
- * path (and we use LLTX so upper layers don't do extra locking).
- * lock protects everything else. lock nests inside of tx_lock (ie
- * tx_lock must be acquired first if needed).
+ * Device private locking: network stack tx_lock protects members used
+ * in TX fast path, lock protects everything else. lock nests inside
+ * of tx_lock (ie tx_lock must be acquired first if needed).
*/
struct ipoib_dev_priv {
spinlock_t lock;
@@ -293,6 +292,7 @@ struct ipoib_dev_priv {
struct delayed_work pkey_poll_task;
struct delayed_work mcast_task;
+ struct work_struct carrier_on_task;
struct work_struct flush_light;
struct work_struct flush_normal;
struct work_struct flush_heavy;
@@ -319,7 +319,6 @@ struct ipoib_dev_priv {
struct ipoib_rx_buf *rx_ring;
- spinlock_t tx_lock;
struct ipoib_tx_buf *tx_ring;
unsigned tx_head;
unsigned tx_tail;
@@ -464,6 +463,7 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
void ipoib_dev_cleanup(struct net_device *dev);
void ipoib_mcast_join_task(struct work_struct *work);
+void ipoib_mcast_carrier_on_task(struct work_struct *work);
void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
void ipoib_mcast_restart_task(struct work_struct *work);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 341ffedafed..7b14c2c3950 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -786,7 +786,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
dev_kfree_skb_any(tx_req->skb);
- spin_lock_irqsave(&priv->tx_lock, flags);
+ netif_tx_lock(dev);
+
++tx->tx_tail;
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
netif_queue_stopped(dev) &&
@@ -801,7 +802,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
if (neigh) {
@@ -821,10 +822,10 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ netif_tx_unlock(dev);
}
int ipoib_cm_dev_open(struct net_device *dev)
@@ -1149,7 +1150,6 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
{
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
struct ipoib_cm_tx_buf *tx_req;
- unsigned long flags;
unsigned long begin;
ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@@ -1180,12 +1180,12 @@ timeout:
DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb);
++p->tx_tail;
- spin_lock_irqsave(&priv->tx_lock, flags);
+ netif_tx_lock_bh(p->dev);
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ netif_tx_unlock_bh(p->dev);
}
if (p->qp)
@@ -1202,6 +1202,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
struct net_device *dev = priv->dev;
struct ipoib_neigh *neigh;
+ unsigned long flags;
int ret;
switch (event->event) {
@@ -1220,8 +1221,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
case IB_CM_REJ_RECEIVED:
case IB_CM_TIMEWAIT_EXIT:
ipoib_dbg(priv, "CM error %d.\n", event->event);
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
if (neigh) {
@@ -1239,8 +1240,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
queue_work(ipoib_workqueue, &priv->cm.reap_task);
}
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
break;
default:
break;
@@ -1294,19 +1295,24 @@ static void ipoib_cm_tx_start(struct work_struct *work)
struct ib_sa_path_rec pathrec;
u32 qpn;
- spin_lock_irqsave(&priv->tx_lock, flags);
- spin_lock(&priv->lock);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
while (!list_empty(&priv->cm.start_list)) {
p = list_entry(priv->cm.start_list.next, typeof(*p), list);
list_del_init(&p->list);
neigh = p->neigh;
qpn = IPOIB_QPN(neigh->neighbour->ha);
memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
- spin_unlock(&priv->lock);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+
ret = ipoib_cm_tx_init(p, qpn, &pathrec);
- spin_lock_irqsave(&priv->tx_lock, flags);
- spin_lock(&priv->lock);
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
if (ret) {
neigh = p->neigh;
if (neigh) {
@@ -1320,44 +1326,52 @@ static void ipoib_cm_tx_start(struct work_struct *work)
kfree(p);
}
}
- spin_unlock(&priv->lock);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
}
static void ipoib_cm_tx_reap(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.reap_task);
+ struct net_device *dev = priv->dev;
struct ipoib_cm_tx *p;
+ unsigned long flags;
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
list_del(&p->list);
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
ipoib_cm_tx_destroy(p);
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
}
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
}
static void ipoib_cm_skb_reap(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.skb_task);
+ struct net_device *dev = priv->dev;
struct sk_buff *skb;
-
+ unsigned long flags;
unsigned mtu = priv->mcast_mtu;
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+
if (skb->protocol == htons(ETH_P_IP))
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -1365,11 +1379,13 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
#endif
dev_kfree_skb_any(skb);
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
}
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
}
void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 66cafa20c24..0e748aeeae9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -468,21 +468,22 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
static void drain_tx_cq(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_lock, flags);
+ netif_tx_lock(dev);
while (poll_tx(priv))
; /* nothing */
if (netif_queue_stopped(dev))
mod_timer(&priv->poll_timer, jiffies + 1);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ netif_tx_unlock(dev);
}
void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
{
- drain_tx_cq((struct net_device *)dev_ptr);
+ struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
+
+ mod_timer(&priv->poll_timer, jiffies);
}
static inline int post_send(struct ipoib_dev_priv *priv,
@@ -614,17 +615,20 @@ static void __ipoib_reap_ah(struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_ah *ah, *tah;
LIST_HEAD(remove_list);
+ unsigned long flags;
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
list_del(&ah->list);
ib_destroy_ah(ah->ah);
kfree(ah);
}
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
}
void ipoib_reap_ah(struct work_struct *work)
@@ -761,6 +765,14 @@ void ipoib_drain_cq(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int i, n;
+
+ /*
+ * We call completion handling routines that expect to be
+ * called from the BH-disabled NAPI poll context, so disable
+ * BHs here too.
+ */
+ local_bh_disable();
+
do {
n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
for (i = 0; i < n; ++i) {
@@ -784,6 +796,8 @@ void ipoib_drain_cq(struct net_device *dev)
while (poll_tx(priv))
; /* nothing */
+
+ local_bh_enable();
}
int ipoib_ib_dev_stop(struct net_device *dev, int flush)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7e9e218738f..c0ee514396d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -373,9 +373,10 @@ void ipoib_flush_paths(struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path, *tp;
LIST_HEAD(remove_list);
+ unsigned long flags;
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->path_list, &remove_list);
@@ -385,15 +386,16 @@ void ipoib_flush_paths(struct net_device *dev)
list_for_each_entry_safe(path, tp, &remove_list, list) {
if (path->query)
ib_sa_cancel_query(path->query_id, path->query);
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
wait_for_completion(&path->done);
path_free(dev, path);
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
}
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
}
static void path_rec_completion(int status,
@@ -404,7 +406,7 @@ static void path_rec_completion(int status,
struct net_device *dev = path->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_ah *ah = NULL;
- struct ipoib_ah *old_ah;
+ struct ipoib_ah *old_ah = NULL;
struct ipoib_neigh *neigh, *tn;
struct sk_buff_head skqueue;
struct sk_buff *skb;
@@ -428,12 +430,12 @@ static void path_rec_completion(int status,
spin_lock_irqsave(&priv->lock, flags);
- old_ah = path->ah;
- path->ah = ah;
-
if (ah) {
path->pathrec = *pathrec;
+ old_ah = path->ah;
+ path->ah = ah;
+
ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
@@ -555,6 +557,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
struct ipoib_neigh *neigh;
+ unsigned long flags;
neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
if (!neigh) {
@@ -563,11 +566,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
return;
}
- /*
- * We can only be called from ipoib_start_xmit, so we're
- * inside tx_lock -- no need to save/restore flags.
- */
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
path = __path_find(dev, skb->dst->neighbour->ha + 4);
if (!path) {
@@ -614,7 +613,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
__skb_queue_tail(&neigh->queue, skb);
}
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
return;
err_list:
@@ -626,7 +625,7 @@ err_drop:
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
@@ -650,12 +649,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
+ unsigned long flags;
- /*
- * We can only be called from ipoib_start_xmit, so we're
- * inside tx_lock -- no need to save/restore flags.
- */
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) {
@@ -667,7 +663,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
__skb_queue_tail(&path->queue, skb);
if (path_rec_start(dev, path)) {
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
path_free(dev, path);
return;
} else
@@ -677,7 +673,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
dev_kfree_skb_any(skb);
}
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
return;
}
@@ -696,7 +692,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
dev_kfree_skb_any(skb);
}
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -705,13 +701,10 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct ipoib_neigh *neigh;
unsigned long flags;
- if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
- return NETDEV_TX_LOCKED;
-
if (likely(skb->dst && skb->dst->neighbour)) {
if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
ipoib_path_lookup(skb, dev);
- goto out;
+ return NETDEV_TX_OK;
}
neigh = *to_ipoib_neigh(skb->dst->neighbour);
@@ -721,7 +714,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->dst->neighbour->ha + 4,
sizeof(union ib_gid))) ||
(neigh->dev != dev))) {
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
/*
* It's safe to call ipoib_put_ah() inside
* priv->lock here, because we know that
@@ -732,25 +725,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
ipoib_put_ah(neigh->ah);
list_del(&neigh->list);
ipoib_neigh_free(dev, neigh);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
ipoib_path_lookup(skb, dev);
- goto out;
+ return NETDEV_TX_OK;
}
if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
- goto out;
+ return NETDEV_TX_OK;
}
} else if (neigh->ah) {
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
- goto out;
+ return NETDEV_TX_OK;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
__skb_queue_tail(&neigh->queue, skb);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
} else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
@@ -779,16 +772,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
dev_kfree_skb_any(skb);
++dev->stats.tx_dropped;
- goto out;
+ return NETDEV_TX_OK;
}
unicast_arp_send(skb, dev, phdr);
}
}
-out:
- spin_unlock_irqrestore(&priv->tx_lock, flags);
-
return NETDEV_TX_OK;
}
@@ -1052,7 +1042,6 @@ static void ipoib_setup(struct net_device *dev)
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
dev->features = (NETIF_F_VLAN_CHALLENGED |
- NETIF_F_LLTX |
NETIF_F_HIGHDMA);
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
@@ -1064,7 +1053,6 @@ static void ipoib_setup(struct net_device *dev)
ipoib_lro_setup(priv);
spin_lock_init(&priv->lock);
- spin_lock_init(&priv->tx_lock);
mutex_init(&priv->vlan_mutex);
@@ -1075,6 +1063,7 @@ static void ipoib_setup(struct net_device *dev)
INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
+ INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ac33c8f3ea8..d9d1223c3fd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -69,14 +69,13 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
struct net_device *dev = mcast->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh, *tmp;
- unsigned long flags;
int tx_dropped = 0;
ipoib_dbg_mcast(netdev_priv(dev),
"deleting multicast group " IPOIB_GID_FMT "\n",
IPOIB_GID_ARG(mcast->mcmember.mgid));
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irq(&priv->lock);
list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
/*
@@ -90,7 +89,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
ipoib_neigh_free(dev, neigh);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irq(&priv->lock);
if (mcast->ah)
ipoib_put_ah(mcast->ah);
@@ -100,9 +99,9 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
}
- spin_lock_irqsave(&priv->tx_lock, flags);
+ netif_tx_lock_bh(dev);
dev->stats.tx_dropped += tx_dropped;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ netif_tx_unlock_bh(dev);
kfree(mcast);
}
@@ -259,10 +258,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
}
/* actually send any queued packets */
- spin_lock_irq(&priv->tx_lock);
+ netif_tx_lock_bh(dev);
while (!skb_queue_empty(&mcast->pkt_queue)) {
struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
- spin_unlock_irq(&priv->tx_lock);
+ netif_tx_unlock_bh(dev);
skb->dev = dev;
@@ -273,9 +272,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
if (dev_queue_xmit(skb))
ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
- spin_lock_irq(&priv->tx_lock);
+ netif_tx_lock_bh(dev);
}
- spin_unlock_irq(&priv->tx_lock);
+ netif_tx_unlock_bh(dev);
return 0;
}
@@ -286,7 +285,6 @@ ipoib_mcast_sendonly_join_complete(int status,
{
struct ipoib_mcast *mcast = multicast->context;
struct net_device *dev = mcast->dev;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
/* We trap for port events ourselves. */
if (status == -ENETRESET)
@@ -302,12 +300,12 @@ ipoib_mcast_sendonly_join_complete(int status,
IPOIB_GID_ARG(mcast->mcmember.mgid), status);
/* Flush out any queued packets */
- spin_lock_irq(&priv->tx_lock);
+ netif_tx_lock_bh(dev);
while (!skb_queue_empty(&mcast->pkt_queue)) {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
}
- spin_unlock_irq(&priv->tx_lock);
+ netif_tx_unlock_bh(dev);
/* Clear the busy flag so we try again */
status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
@@ -366,6 +364,21 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
return ret;
}
+void ipoib_mcast_carrier_on_task(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
+ carrier_on_task);
+
+ /*
+ * Take rtnl_lock to avoid racing with ipoib_stop() and
+ * turning the carrier back on while a device is being
+ * removed.
+ */
+ rtnl_lock();
+ netif_carrier_on(priv->dev);
+ rtnl_unlock();
+}
+
static int ipoib_mcast_join_complete(int status,
struct ib_sa_multicast *multicast)
{
@@ -392,16 +405,12 @@ static int ipoib_mcast_join_complete(int status,
&priv->mcast_task, 0);
mutex_unlock(&mcast_mutex);
- if (mcast == priv->broadcast) {
- /*
- * Take RTNL lock here to avoid racing with
- * ipoib_stop() and turning the carrier back
- * on while a device is being removed.
- */
- rtnl_lock();
- netif_carrier_on(dev);
- rtnl_unlock();
- }
+ /*
+ * Defer carrier on work to ipoib_workqueue to avoid a
+ * deadlock on rtnl_lock here.
+ */
+ if (mcast == priv->broadcast)
+ queue_work(ipoib_workqueue, &priv->carrier_on_task);
return 0;
}
@@ -651,12 +660,9 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_mcast *mcast;
+ unsigned long flags;
- /*
- * We can only be called from ipoib_start_xmit, so we're
- * inside tx_lock -- no need to save/restore flags.
- */
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
!priv->broadcast ||
@@ -727,7 +733,7 @@ out:
}
unlock:
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
void ipoib_mcast_dev_flush(struct net_device *dev)
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 18f4d7f6ce6..2998a6ac9ae 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -351,8 +351,9 @@ static int report_tp_state(struct bcm5974 *dev, int size)
#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
+#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08
-static int bcm5974_wellspring_mode(struct bcm5974 *dev)
+static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
{
char *data = kmalloc(8, GFP_KERNEL);
int retval = 0, size;
@@ -377,7 +378,9 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev)
}
/* apply the mode switch */
- data[0] = BCM5974_WELLSPRING_MODE_VENDOR_VALUE;
+ data[0] = on ?
+ BCM5974_WELLSPRING_MODE_VENDOR_VALUE :
+ BCM5974_WELLSPRING_MODE_NORMAL_VALUE;
/* write configuration */
size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
@@ -392,7 +395,8 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev)
goto out;
}
- dprintk(2, "bcm5974: switched to wellspring mode.\n");
+ dprintk(2, "bcm5974: switched to %s mode.\n",
+ on ? "wellspring" : "normal");
out:
kfree(data);
@@ -481,7 +485,7 @@ exit:
*/
static int bcm5974_start_traffic(struct bcm5974 *dev)
{
- if (bcm5974_wellspring_mode(dev)) {
+ if (bcm5974_wellspring_mode(dev, true)) {
dprintk(1, "bcm5974: mode switch failed\n");
goto error;
}
@@ -504,6 +508,7 @@ static void bcm5974_pause_traffic(struct bcm5974 *dev)
{
usb_kill_urb(dev->tp_urb);
usb_kill_urb(dev->bt_urb);
+ bcm5974_wellspring_mode(dev, false);
}
/*
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index bf44f9d6834..c8b7e8a45c4 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -119,8 +119,8 @@ static int __devinit jornada720_ts_probe(struct platform_device *pdev)
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
- input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
- input_dev->keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH);
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0);
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index be0e12144b8..34935155c1c 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -161,6 +161,16 @@ static int fsg_led_probe(struct platform_device *pdev)
{
int ret;
+ /* Map the LED chip select address space */
+ latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512);
+ if (!latch_address) {
+ ret = -ENOMEM;
+ goto failremap;
+ }
+
+ latch_value = 0xffff;
+ *latch_address = latch_value;
+
ret = led_classdev_register(&pdev->dev, &fsg_wlan_led);
if (ret < 0)
goto failwlan;
@@ -185,20 +195,8 @@ static int fsg_led_probe(struct platform_device *pdev)
if (ret < 0)
goto failring;
- /* Map the LED chip select address space */
- latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512);
- if (!latch_address) {
- ret = -ENOMEM;
- goto failremap;
- }
-
- latch_value = 0xffff;
- *latch_address = latch_value;
-
return ret;
- failremap:
- led_classdev_unregister(&fsg_ring_led);
failring:
led_classdev_unregister(&fsg_sync_led);
failsync:
@@ -210,14 +208,14 @@ static int fsg_led_probe(struct platform_device *pdev)
failwan:
led_classdev_unregister(&fsg_wlan_led);
failwlan:
+ iounmap(latch_address);
+ failremap:
return ret;
}
static int fsg_led_remove(struct platform_device *pdev)
{
- iounmap(latch_address);
-
led_classdev_unregister(&fsg_wlan_led);
led_classdev_unregister(&fsg_wan_led);
led_classdev_unregister(&fsg_sata_led);
@@ -225,6 +223,8 @@ static int fsg_led_remove(struct platform_device *pdev)
led_classdev_unregister(&fsg_sync_led);
led_classdev_unregister(&fsg_ring_led);
+ iounmap(latch_address);
+
return 0;
}
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 146c0697286..f508729123b 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -248,11 +248,10 @@ static int __devinit pca955x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pca955x_led *pca955x;
- int i;
- int err = -ENODEV;
struct pca955x_chipdef *chip;
struct i2c_adapter *adapter;
struct led_platform_data *pdata;
+ int i, err;
chip = &pca955x_chipdefs[id->driver_data];
adapter = to_i2c_adapter(client->dev.parent);
@@ -282,43 +281,41 @@ static int __devinit pca955x_probe(struct i2c_client *client,
}
}
+ pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL);
+ if (!pca955x)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, pca955x);
+
for (i = 0; i < chip->bits; i++) {
- pca955x = kzalloc(sizeof(struct pca955x_led), GFP_KERNEL);
- if (!pca955x) {
- err = -ENOMEM;
- goto exit;
- }
+ pca955x[i].chipdef = chip;
+ pca955x[i].client = client;
+ pca955x[i].led_num = i;
- pca955x->chipdef = chip;
- pca955x->client = client;
- pca955x->led_num = i;
/* Platform data can specify LED names and default triggers */
if (pdata) {
if (pdata->leds[i].name)
- snprintf(pca955x->name, 32, "pca955x:%s",
- pdata->leds[i].name);
+ snprintf(pca955x[i].name,
+ sizeof(pca955x[i].name), "pca955x:%s",
+ pdata->leds[i].name);
if (pdata->leds[i].default_trigger)
- pca955x->led_cdev.default_trigger =
+ pca955x[i].led_cdev.default_trigger =
pdata->leds[i].default_trigger;
} else {
- snprintf(pca955x->name, 32, "pca955x:%d", i);
+ snprintf(pca955x[i].name, sizeof(pca955x[i].name),
+ "pca955x:%d", i);
}
- spin_lock_init(&pca955x->lock);
- pca955x->led_cdev.name = pca955x->name;
- pca955x->led_cdev.brightness_set =
- pca955x_led_set;
+ spin_lock_init(&pca955x[i].lock);
- /*
- * Client data is a pointer to the _first_ pca955x_led
- * struct
- */
- if (i == 0)
- i2c_set_clientdata(client, pca955x);
+ pca955x[i].led_cdev.name = pca955x[i].name;
+ pca955x[i].led_cdev.brightness_set = pca955x_led_set;
- INIT_WORK(&(pca955x->work), pca955x_led_work);
+ INIT_WORK(&pca955x[i].work, pca955x_led_work);
- led_classdev_register(&client->dev, &(pca955x->led_cdev));
+ err = led_classdev_register(&client->dev, &pca955x[i].led_cdev);
+ if (err < 0)
+ goto exit;
}
/* Turn off LEDs */
@@ -336,23 +333,32 @@ static int __devinit pca955x_probe(struct i2c_client *client,
pca955x_write_psc(client, 1, 0);
return 0;
+
exit:
+ while (i--) {
+ led_classdev_unregister(&pca955x[i].led_cdev);
+ cancel_work_sync(&pca955x[i].work);
+ }
+
+ kfree(pca955x);
+ i2c_set_clientdata(client, NULL);
+
return err;
}
static int __devexit pca955x_remove(struct i2c_client *client)
{
struct pca955x_led *pca955x = i2c_get_clientdata(client);
- int leds = pca955x->chipdef->bits;
int i;
- for (i = 0; i < leds; i++) {
- led_classdev_unregister(&(pca955x->led_cdev));
- cancel_work_sync(&(pca955x->work));
- kfree(pca955x);
- pca955x = pca955x + 1;
+ for (i = 0; i < pca955x->chipdef->bits; i++) {
+ led_classdev_unregister(&pca955x[i].led_cdev);
+ cancel_work_sync(&pca955x[i].work);
}
+ kfree(pca955x);
+ i2c_set_clientdata(client, NULL);
+
return 0;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 13956437bc8..682ef9e6acd 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -333,7 +333,6 @@ static void crypt_convert_init(struct crypt_config *cc,
ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
ctx->sector = sector + cc->iv_offset;
init_completion(&ctx->restart);
- atomic_set(&ctx->pending, 1);
}
static int crypt_convert_block(struct crypt_config *cc,
@@ -408,6 +407,8 @@ static int crypt_convert(struct crypt_config *cc,
{
int r;
+ atomic_set(&ctx->pending, 1);
+
while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
ctx->idx_out < ctx->bio_out->bi_vcnt) {
@@ -456,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
/*
* Generate a new unfragmented bio with the given size
* This should never violate the device limitations
- * May return a smaller bio when running out of pages
+ * May return a smaller bio when running out of pages, indicated by
+ * *out_of_pages set to 1.
*/
-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
+ unsigned *out_of_pages)
{
struct crypt_config *cc = io->target->private;
struct bio *clone;
@@ -472,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
return NULL;
clone_init(io, clone);
+ *out_of_pages = 0;
for (i = 0; i < nr_iovecs; i++) {
page = mempool_alloc(cc->page_pool, gfp_mask);
- if (!page)
+ if (!page) {
+ *out_of_pages = 1;
break;
+ }
/*
* if additional pages cannot be allocated without waiting,
@@ -517,6 +523,27 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
}
}
+static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
+ struct bio *bio, sector_t sector)
+{
+ struct crypt_config *cc = ti->private;
+ struct dm_crypt_io *io;
+
+ io = mempool_alloc(cc->io_pool, GFP_NOIO);
+ io->target = ti;
+ io->base_bio = bio;
+ io->sector = sector;
+ io->error = 0;
+ atomic_set(&io->pending, 0);
+
+ return io;
+}
+
+static void crypt_inc_pending(struct dm_crypt_io *io)
+{
+ atomic_inc(&io->pending);
+}
+
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
@@ -591,7 +618,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
struct bio *base_bio = io->base_bio;
struct bio *clone;
- atomic_inc(&io->pending);
+ crypt_inc_pending(io);
/*
* The block layer might modify the bvec array, so always
@@ -653,6 +680,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
io->error = -EIO;
+ crypt_dec_pending(io);
return;
}
@@ -664,28 +692,34 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
if (async)
kcryptd_queue_io(io);
- else {
- atomic_inc(&io->pending);
+ else
generic_make_request(clone);
- }
}
-static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
+static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *clone;
+ int crypt_finished;
+ unsigned out_of_pages = 0;
unsigned remaining = io->base_bio->bi_size;
int r;
/*
+ * Prevent io from disappearing until this function completes.
+ */
+ crypt_inc_pending(io);
+ crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
+
+ /*
* The allocated buffers can be smaller than the whole bio,
* so repeat the whole process until all the data can be handled.
*/
while (remaining) {
- clone = crypt_alloc_buffer(io, remaining);
+ clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
if (unlikely(!clone)) {
io->error = -ENOMEM;
- return;
+ break;
}
io->ctx.bio_out = clone;
@@ -693,37 +727,32 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
remaining -= clone->bi_size;
+ crypt_inc_pending(io);
r = crypt_convert(cc, &io->ctx);
+ crypt_finished = atomic_dec_and_test(&io->ctx.pending);
- if (atomic_dec_and_test(&io->ctx.pending)) {
- /* processed, no running async crypto */
+ /* Encryption was already finished, submit io now */
+ if (crypt_finished) {
kcryptd_crypt_write_io_submit(io, r, 0);
- if (unlikely(r < 0))
- return;
- } else
- atomic_inc(&io->pending);
- /* out of memory -> run queues */
- if (unlikely(remaining)) {
- /* wait for async crypto then reinitialize pending */
- wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
- atomic_set(&io->ctx.pending, 1);
- congestion_wait(WRITE, HZ/100);
+ /*
+ * If there was an error, do not try next fragments.
+ * For async, error is processed in async handler.
+ */
+ if (unlikely(r < 0))
+ break;
}
- }
-}
-static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
-{
- struct crypt_config *cc = io->target->private;
-
- /*
- * Prevent io from disappearing until this function completes.
- */
- atomic_inc(&io->pending);
+ /*
+ * Out of memory -> run queues
+ * But don't wait if split was due to the io size restriction
+ */
+ if (unlikely(out_of_pages))
+ congestion_wait(WRITE, HZ/100);
- crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
- kcryptd_crypt_write_convert_loop(io);
+ if (unlikely(remaining))
+ wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
+ }
crypt_dec_pending(io);
}
@@ -741,7 +770,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
struct crypt_config *cc = io->target->private;
int r = 0;
- atomic_inc(&io->pending);
+ crypt_inc_pending(io);
crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
io->sector);
@@ -1108,15 +1137,9 @@ static void crypt_dtr(struct dm_target *ti)
static int crypt_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
- struct crypt_config *cc = ti->private;
struct dm_crypt_io *io;
- io = mempool_alloc(cc->io_pool, GFP_NOIO);
- io->target = ti;
- io->base_bio = bio;
- io->sector = bio->bi_sector - ti->begin;
- io->error = 0;
- atomic_set(&io->pending, 0);
+ io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
if (bio_data_dir(io->base_bio) == READ)
kcryptd_queue_io(io);
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 41f408068a7..769ab677f8e 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -108,12 +108,12 @@ struct pstore {
* Used to keep track of which metadata area the data in
* 'chunk' refers to.
*/
- uint32_t current_area;
+ chunk_t current_area;
/*
* The next free chunk for an exception.
*/
- uint32_t next_free;
+ chunk_t next_free;
/*
* The index of next free exception in the current
@@ -175,7 +175,7 @@ static void do_metadata(struct work_struct *work)
/*
* Read or write a chunk aligned and sized block of data from a device.
*/
-static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
+static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
{
struct dm_io_region where = {
.bdev = ps->snap->cow->bdev,
@@ -209,16 +209,23 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
}
/*
+ * Convert a metadata area index to a chunk index.
+ */
+static chunk_t area_location(struct pstore *ps, chunk_t area)
+{
+ return 1 + ((ps->exceptions_per_area + 1) * area);
+}
+
+/*
* Read or write a metadata area. Remembering to skip the first
* chunk which holds the header.
*/
-static int area_io(struct pstore *ps, uint32_t area, int rw)
+static int area_io(struct pstore *ps, chunk_t area, int rw)
{
int r;
- uint32_t chunk;
+ chunk_t chunk;
- /* convert a metadata area index to a chunk index */
- chunk = 1 + ((ps->exceptions_per_area + 1) * area);
+ chunk = area_location(ps, area);
r = chunk_io(ps, chunk, rw, 0);
if (r)
@@ -228,7 +235,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw)
return 0;
}
-static int zero_area(struct pstore *ps, uint32_t area)
+static int zero_area(struct pstore *ps, chunk_t area)
{
memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
return area_io(ps, area, WRITE);
@@ -404,7 +411,7 @@ static int insert_exceptions(struct pstore *ps, int *full)
static int read_exceptions(struct pstore *ps)
{
- uint32_t area;
+ chunk_t area;
int r, full = 1;
/*
@@ -517,6 +524,7 @@ static int persistent_prepare(struct exception_store *store,
{
struct pstore *ps = get_info(store);
uint32_t stride;
+ chunk_t next_free;
sector_t size = get_dev_size(store->snap->cow->bdev);
/* Is there enough room ? */
@@ -530,7 +538,8 @@ static int persistent_prepare(struct exception_store *store,
* into account the location of the metadata chunks.
*/
stride = (ps->exceptions_per_area + 1);
- if ((++ps->next_free % stride) == 1)
+ next_free = ++ps->next_free;
+ if (sector_div(next_free, stride) == 1)
ps->next_free++;
atomic_inc(&ps->pending_count);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index b262c0042de..dca401dc70a 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -426,7 +426,7 @@ static int list_devices(struct dm_ioctl *param, size_t param_size)
old_nl->next = (uint32_t) ((void *) nl -
(void *) old_nl);
disk = dm_disk(hc->md);
- nl->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor));
+ nl->dev = huge_encode_dev(disk_devt(disk));
nl->next = 0;
strcpy(nl->name, hc->name);
@@ -539,7 +539,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (dm_suspended(md))
param->flags |= DM_SUSPEND_FLAG;
- param->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor));
+ param->dev = huge_encode_dev(disk_devt(disk));
/*
* Yes, this will be out of date by the time it gets back
@@ -548,7 +548,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
*/
param->open_count = dm_open_count(md);
- if (disk->policy)
+ if (get_disk_ro(disk))
param->flags |= DM_READONLY_FLAG;
param->event_nr = dm_get_event_nr(md);
@@ -1131,7 +1131,7 @@ static void retrieve_deps(struct dm_table *table,
unsigned int count = 0;
struct list_head *tmp;
size_t len, needed;
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
struct dm_target_deps *deps;
deps = get_result_buffer(param, param_size, &len);
@@ -1157,7 +1157,7 @@ static void retrieve_deps(struct dm_table *table,
deps->count = count;
count = 0;
list_for_each_entry (dd, dm_table_get_devices(table), list)
- deps->dev[count++] = huge_encode_dev(dd->bdev->bd_dev);
+ deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev);
param->data_size = param->data_start + needed;
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 71dd65aa31b..103304c1e3b 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -30,9 +30,11 @@ struct pgpath {
struct list_head list;
struct priority_group *pg; /* Owning PG */
+ unsigned is_active; /* Path status */
unsigned fail_count; /* Cumulative failure count */
struct dm_path path;
+ struct work_struct deactivate_path;
};
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -63,6 +65,7 @@ struct multipath {
const char *hw_handler_name;
struct work_struct activate_path;
+ struct pgpath *pgpath_to_activate;
unsigned nr_priority_groups;
struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */
@@ -111,6 +114,7 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void process_queued_ios(struct work_struct *work);
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
+static void deactivate_path(struct work_struct *work);
/*-----------------------------------------------
@@ -121,8 +125,10 @@ static struct pgpath *alloc_pgpath(void)
{
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
- if (pgpath)
- pgpath->path.is_active = 1;
+ if (pgpath) {
+ pgpath->is_active = 1;
+ INIT_WORK(&pgpath->deactivate_path, deactivate_path);
+ }
return pgpath;
}
@@ -132,6 +138,14 @@ static void free_pgpath(struct pgpath *pgpath)
kfree(pgpath);
}
+static void deactivate_path(struct work_struct *work)
+{
+ struct pgpath *pgpath =
+ container_of(work, struct pgpath, deactivate_path);
+
+ blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
+}
+
static struct priority_group *alloc_priority_group(void)
{
struct priority_group *pg;
@@ -146,6 +160,7 @@ static struct priority_group *alloc_priority_group(void)
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
+ unsigned long flags;
struct pgpath *pgpath, *tmp;
struct multipath *m = ti->private;
@@ -154,6 +169,10 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
if (m->hw_handler_name)
scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
+ spin_lock_irqsave(&m->lock, flags);
+ if (m->pgpath_to_activate == pgpath)
+ m->pgpath_to_activate = NULL;
+ spin_unlock_irqrestore(&m->lock, flags);
free_pgpath(pgpath);
}
}
@@ -421,6 +440,7 @@ static void process_queued_ios(struct work_struct *work)
__choose_pgpath(m);
pgpath = m->current_pgpath;
+ m->pgpath_to_activate = m->current_pgpath;
if ((pgpath && !m->queue_io) ||
(!pgpath && !m->queue_if_no_path))
@@ -556,12 +576,12 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
/* we need at least a path arg */
if (as->argc < 1) {
ti->error = "no device given";
- return NULL;
+ return ERR_PTR(-EINVAL);
}
p = alloc_pgpath();
if (!p)
- return NULL;
+ return ERR_PTR(-ENOMEM);
r = dm_get_device(ti, shift(as), ti->begin, ti->len,
dm_table_get_mode(ti->table), &p->path.dev);
@@ -589,7 +609,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
bad:
free_pgpath(p);
- return NULL;
+ return ERR_PTR(r);
}
static struct priority_group *parse_priority_group(struct arg_set *as,
@@ -607,14 +627,14 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
if (as->argc < 2) {
as->argc = 0;
- ti->error = "not enough priority group aruments";
- return NULL;
+ ti->error = "not enough priority group arguments";
+ return ERR_PTR(-EINVAL);
}
pg = alloc_priority_group();
if (!pg) {
ti->error = "couldn't allocate priority group";
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
pg->m = m;
@@ -647,8 +667,10 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
path_args.argv = as->argv;
pgpath = parse_path(&path_args, &pg->ps, ti);
- if (!pgpath)
+ if (IS_ERR(pgpath)) {
+ r = PTR_ERR(pgpath);
goto bad;
+ }
pgpath->pg = pg;
list_add_tail(&pgpath->list, &pg->pgpaths);
@@ -659,7 +681,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
bad:
free_priority_group(pg, ti);
- return NULL;
+ return ERR_PTR(r);
}
static int parse_hw_handler(struct arg_set *as, struct multipath *m)
@@ -778,8 +800,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
struct priority_group *pg;
pg = parse_priority_group(&as, m);
- if (!pg) {
- r = -EINVAL;
+ if (IS_ERR(pg)) {
+ r = PTR_ERR(pg);
goto bad;
}
@@ -845,13 +867,13 @@ static int fail_path(struct pgpath *pgpath)
spin_lock_irqsave(&m->lock, flags);
- if (!pgpath->path.is_active)
+ if (!pgpath->is_active)
goto out;
DMWARN("Failing path %s.", pgpath->path.dev->name);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
- pgpath->path.is_active = 0;
+ pgpath->is_active = 0;
pgpath->fail_count++;
m->nr_valid_paths--;
@@ -863,6 +885,7 @@ static int fail_path(struct pgpath *pgpath)
pgpath->path.dev->name, m->nr_valid_paths);
queue_work(kmultipathd, &m->trigger_event);
+ queue_work(kmultipathd, &pgpath->deactivate_path);
out:
spin_unlock_irqrestore(&m->lock, flags);
@@ -881,7 +904,7 @@ static int reinstate_path(struct pgpath *pgpath)
spin_lock_irqsave(&m->lock, flags);
- if (pgpath->path.is_active)
+ if (pgpath->is_active)
goto out;
if (!pgpath->pg->ps.type->reinstate_path) {
@@ -895,7 +918,7 @@ static int reinstate_path(struct pgpath *pgpath)
if (r)
goto out;
- pgpath->path.is_active = 1;
+ pgpath->is_active = 1;
m->current_pgpath = NULL;
if (!m->nr_valid_paths++ && m->queue_size)
@@ -1093,8 +1116,15 @@ static void activate_path(struct work_struct *work)
int ret;
struct multipath *m =
container_of(work, struct multipath, activate_path);
- struct dm_path *path = &m->current_pgpath->path;
+ struct dm_path *path;
+ unsigned long flags;
+ spin_lock_irqsave(&m->lock, flags);
+ path = &m->pgpath_to_activate->path;
+ m->pgpath_to_activate = NULL;
+ spin_unlock_irqrestore(&m->lock, flags);
+ if (!path)
+ return;
ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
pg_init_done(path, ret);
}
@@ -1276,7 +1306,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
list_for_each_entry(p, &pg->pgpaths, list) {
DMEMIT("%s %s %u ", p->path.dev->name,
- p->path.is_active ? "A" : "F",
+ p->is_active ? "A" : "F",
p->fail_count);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps,
diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h
index c198b856a45..e230f719625 100644
--- a/drivers/md/dm-mpath.h
+++ b/drivers/md/dm-mpath.h
@@ -13,8 +13,6 @@ struct dm_dev;
struct dm_path {
struct dm_dev *dev; /* Read-only */
- unsigned is_active; /* Read-only */
-
void *pscontext; /* For path-selector use */
};
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ff05fe89308..29913e42c4a 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -842,7 +842,9 @@ static int recover(struct mirror_set *ms, struct region *reg)
}
/* hand to kcopyd */
- set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
+ if (!errors_handled(ms))
+ set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
+
r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
flags, recovery_complete, reg);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 4de90ab3968..b745d8ac625 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -284,8 +284,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
memset(major_minor, 0, sizeof(major_minor));
sprintf(major_minor, "%d:%d",
- bio->bi_bdev->bd_disk->major,
- bio->bi_bdev->bd_disk->first_minor);
+ MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
+ MINOR(disk_devt(bio->bi_bdev->bd_disk)));
/*
* Test to see which stripe drive triggered the event
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 61f44140923..a740a6950f5 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -250,7 +250,8 @@ static void free_devices(struct list_head *devices)
struct list_head *tmp, *next;
list_for_each_safe(tmp, next, devices) {
- struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+ struct dm_dev_internal *dd =
+ list_entry(tmp, struct dm_dev_internal, list);
kfree(dd);
}
}
@@ -327,12 +328,12 @@ static int lookup_device(const char *path, dev_t *dev)
/*
* See if we've already got a device in the list.
*/
-static struct dm_dev *find_device(struct list_head *l, dev_t dev)
+static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
{
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
list_for_each_entry (dd, l, list)
- if (dd->bdev->bd_dev == dev)
+ if (dd->dm_dev.bdev->bd_dev == dev)
return dd;
return NULL;
@@ -341,45 +342,47 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev)
/*
* Open a device so we can use it as a map destination.
*/
-static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
+static int open_dev(struct dm_dev_internal *d, dev_t dev,
+ struct mapped_device *md)
{
static char *_claim_ptr = "I belong to device-mapper";
struct block_device *bdev;
int r;
- BUG_ON(d->bdev);
+ BUG_ON(d->dm_dev.bdev);
- bdev = open_by_devnum(dev, d->mode);
+ bdev = open_by_devnum(dev, d->dm_dev.mode);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
if (r)
blkdev_put(bdev);
else
- d->bdev = bdev;
+ d->dm_dev.bdev = bdev;
return r;
}
/*
* Close a device that we've been using.
*/
-static void close_dev(struct dm_dev *d, struct mapped_device *md)
+static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
{
- if (!d->bdev)
+ if (!d->dm_dev.bdev)
return;
- bd_release_from_disk(d->bdev, dm_disk(md));
- blkdev_put(d->bdev);
- d->bdev = NULL;
+ bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
+ blkdev_put(d->dm_dev.bdev);
+ d->dm_dev.bdev = NULL;
}
/*
* If possible, this checks an area of a destination device is valid.
*/
-static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
+static int check_device_area(struct dm_dev_internal *dd, sector_t start,
+ sector_t len)
{
- sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
+ sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
if (!dev_size)
return 1;
@@ -392,16 +395,17 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
* careful to leave things as they were if we fail to reopen the
* device.
*/
-static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
+static int upgrade_mode(struct dm_dev_internal *dd, int new_mode,
+ struct mapped_device *md)
{
int r;
- struct dm_dev dd_copy;
- dev_t dev = dd->bdev->bd_dev;
+ struct dm_dev_internal dd_copy;
+ dev_t dev = dd->dm_dev.bdev->bd_dev;
dd_copy = *dd;
- dd->mode |= new_mode;
- dd->bdev = NULL;
+ dd->dm_dev.mode |= new_mode;
+ dd->dm_dev.bdev = NULL;
r = open_dev(dd, dev, md);
if (!r)
close_dev(&dd_copy, md);
@@ -421,7 +425,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
{
int r;
dev_t uninitialized_var(dev);
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
unsigned int major, minor;
BUG_ON(!t);
@@ -443,20 +447,20 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
if (!dd)
return -ENOMEM;
- dd->mode = mode;
- dd->bdev = NULL;
+ dd->dm_dev.mode = mode;
+ dd->dm_dev.bdev = NULL;
if ((r = open_dev(dd, dev, t->md))) {
kfree(dd);
return r;
}
- format_dev_t(dd->name, dev);
+ format_dev_t(dd->dm_dev.name, dev);
atomic_set(&dd->count, 0);
list_add(&dd->list, &t->devices);
- } else if (dd->mode != (mode | dd->mode)) {
+ } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
@@ -465,11 +469,11 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
if (!check_device_area(dd, start, len)) {
DMWARN("device %s too small for target", path);
- dm_put_device(ti, dd);
+ dm_put_device(ti, &dd->dm_dev);
return -EINVAL;
}
- *result = dd;
+ *result = &dd->dm_dev;
return 0;
}
@@ -478,6 +482,13 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
struct io_restrictions *rs = &ti->limits;
+ char b[BDEVNAME_SIZE];
+
+ if (unlikely(!q)) {
+ DMWARN("%s: Cannot set limits for nonexistent device %s",
+ dm_device_name(ti->table->md), bdevname(bdev, b));
+ return;
+ }
/*
* Combine the device limits low.
@@ -540,8 +551,11 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
/*
* Decrement a devices use count and remove it if necessary.
*/
-void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
+void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
+ struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
+ dm_dev);
+
if (atomic_dec_and_test(&dd->count)) {
close_dev(dd, ti->table->md);
list_del(&dd->list);
@@ -937,13 +951,20 @@ int dm_table_resume_targets(struct dm_table *t)
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
int r = 0;
list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->bdev);
- r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+ struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
+ char b[BDEVNAME_SIZE];
+
+ if (likely(q))
+ r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+ else
+ DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
+ dm_device_name(t->md),
+ bdevname(dd->dm_dev.bdev, b));
}
return r;
@@ -951,13 +972,19 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
void dm_table_unplug_all(struct dm_table *t)
{
- struct dm_dev *dd;
+ struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->bdev);
-
- blk_unplug(q);
+ struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
+ char b[BDEVNAME_SIZE];
+
+ if (likely(q))
+ blk_unplug(q);
+ else
+ DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
+ dm_device_name(t->md),
+ bdevname(dd->dm_dev.bdev, b));
}
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index bca448e1187..327de03a5bd 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -377,13 +377,14 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
+ int cpu;
io->start_time = jiffies;
- preempt_disable();
- disk_round_stats(dm_disk(md));
- preempt_enable();
- dm_disk(md)->in_flight = atomic_inc_return(&md->pending);
+ cpu = part_stat_lock();
+ part_round_stats(cpu, &dm_disk(md)->part0);
+ part_stat_unlock();
+ dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
}
static int end_io_acct(struct dm_io *io)
@@ -391,15 +392,16 @@ static int end_io_acct(struct dm_io *io)
struct mapped_device *md = io->md;
struct bio *bio = io->bio;
unsigned long duration = jiffies - io->start_time;
- int pending;
+ int pending, cpu;
int rw = bio_data_dir(bio);
- preempt_disable();
- disk_round_stats(dm_disk(md));
- preempt_enable();
- dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending);
+ cpu = part_stat_lock();
+ part_round_stats(cpu, &dm_disk(md)->part0);
+ part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
+ part_stat_unlock();
- disk_stat_add(dm_disk(md), ticks[rw], duration);
+ dm_disk(md)->part0.in_flight = pending =
+ atomic_dec_return(&md->pending);
return !pending;
}
@@ -837,12 +839,14 @@ static int dm_merge_bvec(struct request_queue *q,
struct dm_table *map = dm_get_table(md);
struct dm_target *ti;
sector_t max_sectors;
- int max_size;
+ int max_size = 0;
if (unlikely(!map))
- return 0;
+ goto out;
ti = dm_table_find_target(map, bvm->bi_sector);
+ if (!dm_target_is_valid(ti))
+ goto out_table;
/*
* Find maximum amount of I/O that won't need splitting
@@ -861,14 +865,16 @@ static int dm_merge_bvec(struct request_queue *q,
if (max_size && ti->type->merge)
max_size = ti->type->merge(ti, bvm, biovec, max_size);
+out_table:
+ dm_table_put(map);
+
+out:
/*
* Always allow an entire first page
*/
if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
max_size = biovec->bv_len;
- dm_table_put(map);
-
return max_size;
}
@@ -881,6 +887,7 @@ static int dm_request(struct request_queue *q, struct bio *bio)
int r = -EIO;
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
+ int cpu;
/*
* There is no use in forwarding any barrier request since we can't
@@ -893,8 +900,10 @@ static int dm_request(struct request_queue *q, struct bio *bio)
down_read(&md->io_lock);
- disk_stat_inc(dm_disk(md), ios[rw]);
- disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
+ part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
+ part_stat_unlock();
/*
* If we're suspended we have to queue
@@ -1142,7 +1151,7 @@ static void unlock_fs(struct mapped_device *md);
static void free_dev(struct mapped_device *md)
{
- int minor = md->disk->first_minor;
+ int minor = MINOR(disk_devt(md->disk));
if (md->suspended_bdev) {
unlock_fs(md);
@@ -1178,7 +1187,7 @@ static void event_callback(void *context)
list_splice_init(&md->uevent_list, &uevents);
spin_unlock_irqrestore(&md->uevent_lock, flags);
- dm_send_uevents(&uevents, &md->disk->dev.kobj);
+ dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
atomic_inc(&md->event_nr);
wake_up(&md->eventq);
@@ -1263,7 +1272,7 @@ static struct mapped_device *dm_find_md(dev_t dev)
md = idr_find(&_minor_idr, minor);
if (md && (md == MINOR_ALLOCED ||
- (dm_disk(md)->first_minor != minor) ||
+ (MINOR(disk_devt(dm_disk(md))) != minor) ||
test_bit(DMF_FREEING, &md->flags))) {
md = NULL;
goto out;
@@ -1314,7 +1323,8 @@ void dm_put(struct mapped_device *md)
if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
map = dm_get_table(md);
- idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor);
+ idr_replace(&_minor_idr, MINOR_ALLOCED,
+ MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
if (!dm_suspended(md)) {
@@ -1634,7 +1644,7 @@ out:
*---------------------------------------------------------------*/
void dm_kobject_uevent(struct mapped_device *md)
{
- kobject_uevent(&md->disk->dev.kobj, KOBJ_CHANGE);
+ kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 1e59a0b0a78..cd189da2b2f 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -25,13 +25,10 @@
/*
* List of devices that a metadevice uses and should open/close.
*/
-struct dm_dev {
+struct dm_dev_internal {
struct list_head list;
-
atomic_t count;
- int mode;
- struct block_device *bdev;
- char name[16];
+ struct dm_dev dm_dev;
};
struct dm_table;
@@ -49,7 +46,6 @@ void dm_table_presuspend_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
-void dm_table_unplug_all(struct dm_table *t);
/*
* To check the return value from dm_table_find_target().
@@ -93,8 +89,6 @@ void dm_linear_exit(void);
int dm_stripe_init(void);
void dm_stripe_exit(void);
-void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
-union map_info *dm_get_mapinfo(struct bio *bio);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index b1eebf88c20..b9cbee688fa 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -318,14 +318,18 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
mddev_t *mddev = q->queuedata;
dev_info_t *tmp_dev;
sector_t block;
+ int cpu;
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
tmp_dev = which_dev(mddev, bio->bi_sector);
block = bio->bi_sector >> 1;
@@ -349,7 +353,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
* split it.
*/
struct bio_pair *bp;
- bp = bio_split(bio, bio_split_pool,
+ bp = bio_split(bio,
((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector);
if (linear_make_request(q, &bp->bio1))
generic_make_request(&bp->bio1);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4790c83d78d..0a3a4bdcd4a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1464,10 +1464,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
goto fail;
- if (rdev->bdev->bd_part)
- ko = &rdev->bdev->bd_part->dev.kobj;
- else
- ko = &rdev->bdev->bd_disk->dev.kobj;
+ ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
kobject_del(&rdev->kobj);
goto fail;
@@ -3470,8 +3467,8 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
disk->queue = mddev->queue;
add_disk(disk);
mddev->gendisk = disk;
- error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
- "%s", "md");
+ error = kobject_init_and_add(&mddev->kobj, &md_ktype,
+ &disk_to_dev(disk)->kobj, "%s", "md");
mutex_unlock(&disks_mutex);
if (error)
printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
@@ -3761,7 +3758,7 @@ static int do_md_run(mddev_t * mddev)
sysfs_notify(&mddev->kobj, NULL, "array_state");
sysfs_notify(&mddev->kobj, NULL, "sync_action");
sysfs_notify(&mddev->kobj, NULL, "degraded");
- kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE);
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
return 0;
}
@@ -5549,8 +5546,8 @@ static int is_mddev_idle(mddev_t *mddev)
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
- curr_events = disk_stat_read(disk, sectors[0]) +
- disk_stat_read(disk, sectors[1]) -
+ curr_events = part_stat_read(&disk->part0, sectors[0]) +
+ part_stat_read(&disk->part0, sectors[1]) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
@@ -5761,7 +5758,11 @@ void md_do_sync(mddev_t *mddev)
* time 'round when curr_resync == 2
*/
continue;
- prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
+ /* We need to wait 'interruptible' so as not to
+ * contribute to the load average, and not to
+ * be caught by 'softlockup'
+ */
+ prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
mddev2->curr_resync >= mddev->curr_resync) {
printk(KERN_INFO "md: delaying %s of %s"
@@ -5769,6 +5770,8 @@ void md_do_sync(mddev_t *mddev)
" share one or more physical units)\n",
desc, mdname(mddev), mdname(mddev2));
mddev_put(mddev2);
+ if (signal_pending(current))
+ flush_signals(current);
schedule();
finish_wait(&resync_wait, &wq);
goto try_again;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index c4779ccba1c..8bb8794129b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -147,6 +147,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
const int rw = bio_data_dir(bio);
+ int cpu;
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, -EOPNOTSUPP);
@@ -158,8 +159,11 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
mp_bh->master_bio = bio;
mp_bh->mddev = mddev;
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 18361063566..53508a8a981 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -399,14 +399,18 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
sector_t chunk;
sector_t block, rsect;
const int rw = bio_data_dir(bio);
+ int cpu;
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
chunk_size = mddev->chunk_size >> 10;
chunk_sects = mddev->chunk_size >> 9;
@@ -423,7 +427,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
/* This is a one page bio that upper layers
* refuse to split for us, so we need to split it.
*/
- bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
+ bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
if (raid0_make_request(q, &bp->bio1))
generic_make_request(&bp->bio1);
if (raid0_make_request(q, &bp->bio2))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 03a5ab705c2..b9764429d85 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -779,7 +779,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
const int do_sync = bio_sync(bio);
- int do_barriers;
+ int cpu, do_barriers;
mdk_rdev_t *blocked_rdev;
/*
@@ -804,8 +804,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
bitmap = mddev->bitmap;
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
/*
* make_request() can abort the operation when READA is being
@@ -1302,9 +1305,6 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
sbio->bi_size = r1_bio->sectors << 9;
sbio->bi_idx = 0;
sbio->bi_phys_segments = 0;
- sbio->bi_hw_segments = 0;
- sbio->bi_hw_front_size = 0;
- sbio->bi_hw_back_size = 0;
sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
sbio->bi_flags |= 1 << BIO_UPTODATE;
sbio->bi_next = NULL;
@@ -1790,7 +1790,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
bio->bi_vcnt = 0;
bio->bi_idx = 0;
bio->bi_phys_segments = 0;
- bio->bi_hw_segments = 0;
bio->bi_size = 0;
bio->bi_end_io = NULL;
bio->bi_private = NULL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e34cd0e6247..8bdc9bfc288 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -789,6 +789,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
mirror_info_t *mirror;
r10bio_t *r10_bio;
struct bio *read_bio;
+ int cpu;
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
@@ -816,7 +817,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
/* This is a one page bio that upper layers
* refuse to split for us, so we need to split it.
*/
- bp = bio_split(bio, bio_split_pool,
+ bp = bio_split(bio,
chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
if (make_request(q, &bp->bio1))
generic_make_request(&bp->bio1);
@@ -843,8 +844,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
*/
wait_barrier(conf);
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bio));
+ part_stat_unlock();
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
@@ -1345,9 +1349,6 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
tbio->bi_size = r10_bio->sectors << 9;
tbio->bi_idx = 0;
tbio->bi_phys_segments = 0;
- tbio->bi_hw_segments = 0;
- tbio->bi_hw_front_size = 0;
- tbio->bi_hw_back_size = 0;
tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
tbio->bi_flags |= 1 << BIO_UPTODATE;
tbio->bi_next = NULL;
@@ -1947,7 +1948,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
bio->bi_vcnt = 0;
bio->bi_idx = 0;
bio->bi_phys_segments = 0;
- bio->bi_hw_segments = 0;
bio->bi_size = 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 224de022e7c..ae16794bef2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -101,6 +101,40 @@
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
#endif
+/*
+ * We maintain a biased count of active stripes in the bottom 16 bits of
+ * bi_phys_segments, and a count of processed stripes in the upper 16 bits
+ */
+static inline int raid5_bi_phys_segments(struct bio *bio)
+{
+ return bio->bi_phys_segments & 0xffff;
+}
+
+static inline int raid5_bi_hw_segments(struct bio *bio)
+{
+ return (bio->bi_phys_segments >> 16) & 0xffff;
+}
+
+static inline int raid5_dec_bi_phys_segments(struct bio *bio)
+{
+ --bio->bi_phys_segments;
+ return raid5_bi_phys_segments(bio);
+}
+
+static inline int raid5_dec_bi_hw_segments(struct bio *bio)
+{
+ unsigned short val = raid5_bi_hw_segments(bio);
+
+ --val;
+ bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
+ return val;
+}
+
+static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
+{
+ bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
+}
+
static inline int raid6_next_disk(int disk, int raid_disks)
{
disk++;
@@ -507,7 +541,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
while (rbi && rbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
rbi2 = r5_next_bio(rbi, dev->sector);
- if (--rbi->bi_phys_segments == 0) {
+ if (!raid5_dec_bi_phys_segments(rbi)) {
rbi->bi_next = return_bi;
return_bi = rbi;
}
@@ -1725,7 +1759,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip)
bi->bi_next = *bip;
*bip = bi;
- bi->bi_phys_segments ++;
+ bi->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
spin_unlock(&sh->lock);
@@ -1819,7 +1853,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
+ if (!raid5_dec_bi_phys_segments(bi)) {
md_write_end(conf->mddev);
bi->bi_next = *return_bi;
*return_bi = bi;
@@ -1834,7 +1868,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
+ if (!raid5_dec_bi_phys_segments(bi)) {
md_write_end(conf->mddev);
bi->bi_next = *return_bi;
*return_bi = bi;
@@ -1858,7 +1892,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
+ if (!raid5_dec_bi_phys_segments(bi)) {
bi->bi_next = *return_bi;
*return_bi = bi;
}
@@ -2033,7 +2067,7 @@ static void handle_stripe_clean_event(raid5_conf_t *conf,
while (wbi && wbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector);
- if (--wbi->bi_phys_segments == 0) {
+ if (!raid5_dec_bi_phys_segments(wbi)) {
md_write_end(conf->mddev);
wbi->bi_next = *return_bi;
*return_bi = wbi;
@@ -2814,7 +2848,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
copy_data(0, rbi, dev->page, dev->sector);
rbi2 = r5_next_bio(rbi, dev->sector);
spin_lock_irq(&conf->device_lock);
- if (--rbi->bi_phys_segments == 0) {
+ if (!raid5_dec_bi_phys_segments(rbi)) {
rbi->bi_next = return_bi;
return_bi = rbi;
}
@@ -3155,8 +3189,11 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
if(bi) {
conf->retry_read_aligned_list = bi->bi_next;
bi->bi_next = NULL;
+ /*
+ * this sets the active strip count to 1 and the processed
+ * strip count to zero (upper 8 bits)
+ */
bi->bi_phys_segments = 1; /* biased count of active stripes */
- bi->bi_hw_segments = 0; /* count of processed stripes */
}
return bi;
@@ -3206,8 +3243,7 @@ static int bio_fits_rdev(struct bio *bi)
if ((bi->bi_size>>9) > q->max_sectors)
return 0;
blk_recount_segments(q, bi);
- if (bi->bi_phys_segments > q->max_phys_segments ||
- bi->bi_hw_segments > q->max_hw_segments)
+ if (bi->bi_phys_segments > q->max_phys_segments)
return 0;
if (q->merge_bvec_fn)
@@ -3351,7 +3387,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
sector_t logical_sector, last_sector;
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
- int remaining;
+ int cpu, remaining;
if (unlikely(bio_barrier(bi))) {
bio_endio(bi, -EOPNOTSUPP);
@@ -3360,8 +3396,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
md_write_start(mddev, bi);
- disk_stat_inc(mddev->gendisk, ios[rw]);
- disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
+ bio_sectors(bi));
+ part_stat_unlock();
if (rw == READ &&
mddev->reshape_position == MaxSector &&
@@ -3468,7 +3507,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
}
spin_lock_irq(&conf->device_lock);
- remaining = --bi->bi_phys_segments;
+ remaining = raid5_dec_bi_phys_segments(bi);
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) {
@@ -3752,7 +3791,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
sector += STRIPE_SECTORS,
scnt++) {
- if (scnt < raid_bio->bi_hw_segments)
+ if (scnt < raid5_bi_hw_segments(raid_bio))
/* already done this stripe */
continue;
@@ -3760,7 +3799,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
if (!sh) {
/* failed to get a stripe - must wait */
- raid_bio->bi_hw_segments = scnt;
+ raid5_set_bi_hw_segments(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
return handled;
}
@@ -3768,7 +3807,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
release_stripe(sh);
- raid_bio->bi_hw_segments = scnt;
+ raid5_set_bi_hw_segments(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
return handled;
}
@@ -3778,7 +3817,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
handled++;
}
spin_lock_irq(&conf->device_lock);
- remaining = --raid_bio->bi_phys_segments;
+ remaining = raid5_dec_bi_phys_segments(raid_bio);
spin_unlock_irq(&conf->device_lock);
if (remaining == 0)
bio_endio(raid_bio, 0);
diff --git a/drivers/media/common/tuners/tuner-xc2028.h b/drivers/media/common/tuners/tuner-xc2028.h
index 216025cf5d4..2c5b6282b56 100644
--- a/drivers/media/common/tuners/tuner-xc2028.h
+++ b/drivers/media/common/tuners/tuner-xc2028.h
@@ -10,6 +10,7 @@
#include "dvb_frontend.h"
#define XC2028_DEFAULT_FIRMWARE "xc3028-v27.fw"
+#define XC3028L_DEFAULT_FIRMWARE "xc3028L-v36.fw"
/* Dmoduler IF (kHz) */
#define XC3028_FE_DEFAULT 0 /* Don't load SCODE */
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index 4eed783f4bc..a127a4175c4 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -491,6 +491,7 @@ static struct s5h1420_config skystar2_rev2_7_s5h1420_config = {
.demod_address = 0x53,
.invert = 1,
.repeated_start_workaround = 1,
+ .serial_mpeg = 1,
};
static struct itd1000_config skystar2_rev2_7_itd1000_config = {
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 069d847ba88..0c733c66a44 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -364,15 +364,16 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
enum dmx_success success)
{
struct dmxdev_filter *dmxdevfilter = filter->priv;
+ unsigned long flags;
int ret;
if (dmxdevfilter->buffer.error) {
wake_up(&dmxdevfilter->buffer.queue);
return 0;
}
- spin_lock(&dmxdevfilter->dev->lock);
+ spin_lock_irqsave(&dmxdevfilter->dev->lock, flags);
if (dmxdevfilter->state != DMXDEV_STATE_GO) {
- spin_unlock(&dmxdevfilter->dev->lock);
+ spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
return 0;
}
del_timer(&dmxdevfilter->timer);
@@ -391,7 +392,7 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
}
if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
dmxdevfilter->state = DMXDEV_STATE_DONE;
- spin_unlock(&dmxdevfilter->dev->lock);
+ spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
wake_up(&dmxdevfilter->buffer.queue);
return 0;
}
@@ -403,11 +404,12 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
{
struct dmxdev_filter *dmxdevfilter = feed->priv;
struct dvb_ringbuffer *buffer;
+ unsigned long flags;
int ret;
- spin_lock(&dmxdevfilter->dev->lock);
+ spin_lock_irqsave(&dmxdevfilter->dev->lock, flags);
if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
- spin_unlock(&dmxdevfilter->dev->lock);
+ spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
return 0;
}
@@ -417,7 +419,7 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
else
buffer = &dmxdevfilter->dev->dvr_buffer;
if (buffer->error) {
- spin_unlock(&dmxdevfilter->dev->lock);
+ spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
wake_up(&buffer->queue);
return 0;
}
@@ -428,7 +430,7 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
dvb_ringbuffer_flush(buffer);
buffer->error = ret;
}
- spin_unlock(&dmxdevfilter->dev->lock);
+ spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
wake_up(&buffer->queue);
return 0;
}
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index e2eca0b1fe7..a2c1fd5d2f6 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -399,7 +399,9 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
size_t count)
{
- spin_lock(&demux->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&demux->lock, flags);
while (count--) {
if (buf[0] == 0x47)
@@ -407,16 +409,17 @@ void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
buf += 188;
}
- spin_unlock(&demux->lock);
+ spin_unlock_irqrestore(&demux->lock, flags);
}
EXPORT_SYMBOL(dvb_dmx_swfilter_packets);
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count)
{
+ unsigned long flags;
int p = 0, i, j;
- spin_lock(&demux->lock);
+ spin_lock_irqsave(&demux->lock, flags);
if (demux->tsbufp) {
i = demux->tsbufp;
@@ -449,17 +452,18 @@ void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count)
}
bailout:
- spin_unlock(&demux->lock);
+ spin_unlock_irqrestore(&demux->lock, flags);
}
EXPORT_SYMBOL(dvb_dmx_swfilter);
void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count)
{
+ unsigned long flags;
int p = 0, i, j;
u8 tmppack[188];
- spin_lock(&demux->lock);
+ spin_lock_irqsave(&demux->lock, flags);
if (demux->tsbufp) {
i = demux->tsbufp;
@@ -500,7 +504,7 @@ void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count)
}
bailout:
- spin_unlock(&demux->lock);
+ spin_unlock_irqrestore(&demux->lock, flags);
}
EXPORT_SYMBOL(dvb_dmx_swfilter_204);
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index 747d3fa2e5e..2e9fd2893ed 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -59,7 +59,7 @@ struct s5h1420_state {
* it does not support repeated-start, workaround: write addr-1
* and then read
*/
- u8 shadow[255];
+ u8 shadow[256];
};
static u32 s5h1420_getsymbolrate(struct s5h1420_state* state);
@@ -94,8 +94,11 @@ static u8 s5h1420_readreg(struct s5h1420_state *state, u8 reg)
if (ret != 3)
return ret;
} else {
- ret = i2c_transfer(state->i2c, &msg[1], 2);
- if (ret != 2)
+ ret = i2c_transfer(state->i2c, &msg[1], 1);
+ if (ret != 1)
+ return ret;
+ ret = i2c_transfer(state->i2c, &msg[2], 1);
+ if (ret != 1)
return ret;
}
@@ -823,7 +826,7 @@ static int s5h1420_init (struct dvb_frontend* fe)
struct s5h1420_state* state = fe->demodulator_priv;
/* disable power down and do reset */
- state->CON_1_val = 0x10;
+ state->CON_1_val = state->config->serial_mpeg << 4;
s5h1420_writereg(state, 0x02, state->CON_1_val);
msleep(10);
s5h1420_reset(state);
diff --git a/drivers/media/dvb/frontends/s5h1420.h b/drivers/media/dvb/frontends/s5h1420.h
index 4c913f142bc..ff308136d86 100644
--- a/drivers/media/dvb/frontends/s5h1420.h
+++ b/drivers/media/dvb/frontends/s5h1420.h
@@ -32,10 +32,12 @@ struct s5h1420_config
u8 demod_address;
/* does the inversion require inversion? */
- u8 invert : 1;
+ u8 invert:1;
- u8 repeated_start_workaround : 1;
- u8 cdclk_polarity : 1; /* 1 == falling edge, 0 == raising edge */
+ u8 repeated_start_workaround:1;
+ u8 cdclk_polarity:1; /* 1 == falling edge, 0 == raising edge */
+
+ u8 serial_mpeg:1;
};
#if defined(CONFIG_DVB_S5H1420) || (defined(CONFIG_DVB_S5H1420_MODULE) && defined(MODULE))
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c
index cc5efb643f3..9da260fe3fd 100644
--- a/drivers/media/dvb/siano/sms-cards.c
+++ b/drivers/media/dvb/siano/sms-cards.c
@@ -40,6 +40,8 @@ struct usb_device_id smsusb_id_table[] = {
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B },
{ USB_DEVICE(0x2040, 0x5500),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0x5510),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0x5580),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0x5590),
@@ -87,7 +89,7 @@ static struct sms_board sms_boards[] = {
.fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-nova-b-dvbt-01.fw",
},
[SMS1XXX_BOARD_HAUPPAUGE_WINDHAM] = {
- .name = "Hauppauge WinTV-Nova-T-MiniStick",
+ .name = "Hauppauge WinTV MiniStick",
.type = SMS_NOVA_B0,
.fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-01.fw",
},
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 6ae4cc860ef..933eaef41ea 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -3431,7 +3431,7 @@ static int radio_open(struct inode *inode, struct file *file)
dprintk("bttv: open minor=%d\n",minor);
for (i = 0; i < bttv_num; i++) {
- if (bttvs[i].radio_dev->minor == minor) {
+ if (bttvs[i].radio_dev && bttvs[i].radio_dev->minor == minor) {
btv = &bttvs[i];
break;
}
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index c149b7d712e..5405c30dbb0 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
diff --git a/drivers/media/video/cpia2/cpia2_usb.c b/drivers/media/video/cpia2/cpia2_usb.c
index a4574740350..a8a199047cb 100644
--- a/drivers/media/video/cpia2/cpia2_usb.c
+++ b/drivers/media/video/cpia2/cpia2_usb.c
@@ -632,7 +632,7 @@ int cpia2_usb_transfer_cmd(struct camera_data *cam,
static int submit_urbs(struct camera_data *cam)
{
struct urb *urb;
- int fx, err, i;
+ int fx, err, i, j;
for(i=0; i<NUM_SBUF; ++i) {
if (cam->sbuf[i].data)
@@ -657,6 +657,9 @@ static int submit_urbs(struct camera_data *cam)
}
urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL);
if (!urb) {
+ ERR("%s: usb_alloc_urb error!\n", __func__);
+ for (j = 0; j < i; j++)
+ usb_free_urb(cam->sbuf[j].urb);
return -ENOMEM;
}
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index 8fe5f38c4d7..3cb9734ec07 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -163,7 +163,7 @@ static const struct cx18_card cx18_card_h900 = {
},
.audio_inputs = {
{ CX18_CARD_INPUT_AUD_TUNER,
- CX18_AV_AUDIO8, 0 },
+ CX18_AV_AUDIO5, 0 },
{ CX18_CARD_INPUT_LINE_IN1,
CX18_AV_AUDIO_SERIAL1, 0 },
},
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index 3c006103c1e..ac3292d7646 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -117,10 +117,10 @@ static void em28xx_audio_isocirq(struct urb *urb)
if (oldptr + length >= runtime->buffer_size) {
unsigned int cnt =
- runtime->buffer_size - oldptr - 1;
+ runtime->buffer_size - oldptr;
memcpy(runtime->dma_area + oldptr * stride, cp,
cnt * stride);
- memcpy(runtime->dma_area, cp + cnt,
+ memcpy(runtime->dma_area, cp + cnt * stride,
length * stride - cnt * stride);
} else {
memcpy(runtime->dma_area + oldptr * stride, cp,
@@ -161,8 +161,14 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
memset(dev->adev->transfer_buffer[i], 0x80, sb_size);
urb = usb_alloc_urb(EM28XX_NUM_AUDIO_PACKETS, GFP_ATOMIC);
- if (!urb)
+ if (!urb) {
+ em28xx_errdev("usb_alloc_urb failed!\n");
+ for (j = 0; j < i; j++) {
+ usb_free_urb(dev->adev->urb[j]);
+ kfree(dev->adev->transfer_buffer[j]);
+ }
return -ENOMEM;
+ }
urb->dev = dev->udev;
urb->context = dev;
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 452da70e719..de943cf6c16 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -93,28 +93,6 @@ struct em28xx_board em28xx_boards[] = {
.amux = 0,
} },
},
- [EM2800_BOARD_KWORLD_USB2800] = {
- .name = "Kworld USB2800",
- .valid = EM28XX_BOARD_NOT_VALIDATED,
- .is_em2800 = 1,
- .vchannels = 3,
- .tuner_type = TUNER_PHILIPS_FCV1236D,
- .tda9887_conf = TDA9887_PRESENT,
- .decoder = EM28XX_SAA7113,
- .input = { {
- .type = EM28XX_VMUX_TELEVISION,
- .vmux = SAA7115_COMPOSITE2,
- .amux = 0,
- }, {
- .type = EM28XX_VMUX_COMPOSITE1,
- .vmux = SAA7115_COMPOSITE0,
- .amux = 1,
- }, {
- .type = EM28XX_VMUX_SVIDEO,
- .vmux = SAA7115_SVIDEO3,
- .amux = 1,
- } },
- },
[EM2820_BOARD_KWORLD_PVRTV2800RF] = {
.name = "Kworld PVR TV 2800 RF",
.is_em2800 = 0,
@@ -599,7 +577,7 @@ struct em28xx_board em28xx_boards[] = {
}, {
.type = EM28XX_VMUX_COMPOSITE1,
.vmux = TVP5150_COMPOSITE1,
- .amux = 1,
+ .amux = 3,
}, {
.type = EM28XX_VMUX_SVIDEO,
.vmux = TVP5150_SVIDEO,
@@ -952,22 +930,23 @@ struct em28xx_board em28xx_boards[] = {
},
[EM2880_BOARD_KWORLD_DVB_310U] = {
.name = "KWorld DVB-T 310U",
- .valid = EM28XX_BOARD_NOT_VALIDATED,
.vchannels = 3,
.tuner_type = TUNER_XC2028,
+ .has_dvb = 1,
+ .mts_firmware = 1,
.decoder = EM28XX_TVP5150,
.input = { {
.type = EM28XX_VMUX_TELEVISION,
.vmux = TVP5150_COMPOSITE0,
- .amux = 0,
+ .amux = EM28XX_AMUX_VIDEO,
}, {
.type = EM28XX_VMUX_COMPOSITE1,
.vmux = TVP5150_COMPOSITE1,
- .amux = 1,
- }, {
+ .amux = EM28XX_AMUX_AC97_LINE_IN,
+ }, { /* S-video has not been tested yet */
.type = EM28XX_VMUX_SVIDEO,
.vmux = TVP5150_SVIDEO,
- .amux = 1,
+ .amux = EM28XX_AMUX_AC97_LINE_IN,
} },
},
[EM2881_BOARD_DNT_DA2_HYBRID] = {
@@ -1282,6 +1261,7 @@ static struct em28xx_reg_seq em2882_terratec_hybrid_xs_digital[] = {
static struct em28xx_hash_table em28xx_eeprom_hash [] = {
/* P/N: SA 60002070465 Tuner: TVF7533-MF */
{0x6ce05a8f, EM2820_BOARD_PROLINK_PLAYTV_USB2, TUNER_YMEC_TVF_5533MF},
+ {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028},
};
/* I2C devicelist hash table for devices with generic USB IDs */
@@ -1552,9 +1532,12 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
/* djh - Not sure which demod we need here */
ctl->demod = XC3028_FE_DEFAULT;
break;
+ case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
+ ctl->demod = XC3028_FE_DEFAULT;
+ ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+ break;
case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
case EM2880_BOARD_PINNACLE_PCTV_HD_PRO:
- case EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600:
/* FIXME: Better to specify the needed IF */
ctl->demod = XC3028_FE_DEFAULT;
break;
@@ -1764,6 +1747,20 @@ void em28xx_card_setup(struct em28xx *dev)
break;
case EM2820_BOARD_UNKNOWN:
case EM2800_BOARD_UNKNOWN:
+ /*
+ * The K-WORLD DVB-T 310U is detected as an MSI Digivox AD.
+ *
+ * This occurs because they share identical USB vendor and
+ * product IDs.
+ *
+ * What we do here is look up the EEPROM hash of the K-WORLD
+ * and if it is found then we decide that we do not have
+ * a DIGIVOX and reset the device to the K-WORLD instead.
+ *
+ * This solution is only valid if they do not share eeprom
+ * hash identities which has not been determined as yet.
+ */
+ case EM2880_BOARD_MSI_DIGIVOX_AD:
if (!em28xx_hint_board(dev))
em28xx_set_model(dev);
break;
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 4b992bc0083..d2b1a1a5268 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -452,6 +452,15 @@ static int dvb_init(struct em28xx *dev)
goto out_free;
}
break;
+ case EM2880_BOARD_KWORLD_DVB_310U:
+ dvb->frontend = dvb_attach(zl10353_attach,
+ &em28xx_zl10353_with_xc3028,
+ &dev->i2c_adap);
+ if (attach_xc3028(0x61, dev) < 0) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
default:
printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n",
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 7be69284da0..ac95c55887d 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -459,6 +459,7 @@ static int create_urbs(struct gspca_dev *gspca_dev,
urb = usb_alloc_urb(npkt, GFP_KERNEL);
if (!urb) {
err("usb_alloc_urb failed");
+ destroy_urbs(gspca_dev);
return -ENOMEM;
}
urb->transfer_buffer = usb_buffer_alloc(gspca_dev->dev,
@@ -468,8 +469,8 @@ static int create_urbs(struct gspca_dev *gspca_dev,
if (urb->transfer_buffer == NULL) {
usb_free_urb(urb);
- destroy_urbs(gspca_dev);
err("usb_buffer_urb failed");
+ destroy_urbs(gspca_dev);
return -ENOMEM;
}
gspca_dev->urb[n] = urb;
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index d4be5184328..ba865b7f1ed 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -1063,6 +1063,7 @@ static __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x093a, 0x2621), .driver_info = SENSOR_PAC7302},
{USB_DEVICE(0x093a, 0x2624), .driver_info = SENSOR_PAC7302},
{USB_DEVICE(0x093a, 0x2626), .driver_info = SENSOR_PAC7302},
+ {USB_DEVICE(0x093a, 0x262a), .driver_info = SENSOR_PAC7302},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 5dd78c6766e..12b81ae526b 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -232,7 +232,7 @@ static struct ctrl sd_ctrls[] = {
static struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 160,
- .sizeimage = 160 * 120 * 5 / 4,
+ .sizeimage = 160 * 120,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2 | MODE_RAW},
{160, 120, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE,
@@ -264,7 +264,7 @@ static struct v4l2_pix_format sif_mode[] = {
.priv = 1 | MODE_REDUCED_SIF},
{176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 176,
- .sizeimage = 176 * 144 * 5 / 4,
+ .sizeimage = 176 * 144,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1 | MODE_RAW},
{176, 144, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE,
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index d75b1d20b31..572b0f363b6 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -707,6 +707,7 @@ static void i2c_w8(struct gspca_dev *gspca_dev,
0x08, 0, /* value, index */
gspca_dev->usb_buf, 8,
500);
+ msleep(2);
}
/* read 5 bytes in gspca_dev->usb_buf */
@@ -976,13 +977,13 @@ static int sd_init(struct gspca_dev *gspca_dev)
case BRIDGE_SN9C105:
if (regF1 != 0x11)
return -ENODEV;
- reg_w(gspca_dev, 0x02, regGpio, 2);
+ reg_w(gspca_dev, 0x01, regGpio, 2);
break;
case BRIDGE_SN9C120:
if (regF1 != 0x12)
return -ENODEV;
regGpio[1] = 0x70;
- reg_w(gspca_dev, 0x02, regGpio, 2);
+ reg_w(gspca_dev, 0x01, regGpio, 2);
break;
default:
/* case BRIDGE_SN9C110: */
@@ -1183,7 +1184,7 @@ static void sd_start(struct gspca_dev *gspca_dev)
static const __u8 CA[] = { 0x28, 0xd8, 0x14, 0xec };
static const __u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */
static const __u8 CE_ov76xx[] =
- { 0x32, 0xdd, 0x32, 0xdd }; /* OV7630/48 */
+ { 0x32, 0xdd, 0x32, 0xdd };
sn9c1xx = sn_tb[(int) sd->sensor];
configure_gpio(gspca_dev, sn9c1xx);
@@ -1223,8 +1224,15 @@ static void sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x20, gamma_def, sizeof gamma_def);
for (i = 0; i < 8; i++)
reg_w(gspca_dev, 0x84, reg84, sizeof reg84);
+ switch (sd->sensor) {
+ case SENSOR_OV7660:
+ reg_w1(gspca_dev, 0x9a, 0x05);
+ break;
+ default:
reg_w1(gspca_dev, 0x9a, 0x08);
reg_w1(gspca_dev, 0x99, 0x59);
+ break;
+ }
mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
if (mode)
@@ -1275,8 +1283,8 @@ static void sd_start(struct gspca_dev *gspca_dev)
/* reg1 = 0x44; */
/* reg1 = 0x46; (done) */
} else {
- reg17 = 0x22; /* 640 MCKSIZE */
- reg1 = 0x06;
+ reg17 = 0xa2; /* 640 */
+ reg1 = 0x44;
}
break;
}
@@ -1285,6 +1293,7 @@ static void sd_start(struct gspca_dev *gspca_dev)
switch (sd->sensor) {
case SENSOR_OV7630:
case SENSOR_OV7648:
+ case SENSOR_OV7660:
reg_w(gspca_dev, 0xce, CE_ov76xx, 4);
break;
default:
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index cfbc9ebc5c5..95fcfcb9e31 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -225,7 +225,7 @@ static int i2c_read(struct gspca_dev *gspca_dev, __u16 reg, __u8 mode)
reg_w_val(gspca_dev->dev, 0x8802, (mode | 0x01));
do {
reg_r(gspca_dev, 0x8803, 1);
- if (!gspca_dev->usb_buf)
+ if (!gspca_dev->usb_buf[0])
break;
} while (--retry);
if (retry == 0)
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 8d7c27e6ac7..d61ef727e0c 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -6576,8 +6576,8 @@ static int setlightfreq(struct gspca_dev *gspca_dev)
cs2102_60HZ, cs2102_60HZScale},
/* SENSOR_CS2102K 1 */
{cs2102_NoFliker, cs2102_NoFlikerScale,
- cs2102_50HZ, cs2102_50HZScale,
- cs2102_60HZ, cs2102_60HZScale},
+ NULL, NULL, /* currently disabled */
+ NULL, NULL},
/* SENSOR_GC0305 2 */
{gc0305_NoFliker, gc0305_NoFliker,
gc0305_50HZ, gc0305_50HZ,
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 3d3c48db45d..c6852402c5e 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -3591,7 +3591,7 @@ static int
ov51x_init_isoc(struct usb_ov511 *ov)
{
struct urb *urb;
- int fx, err, n, size;
+ int fx, err, n, i, size;
PDEBUG(3, "*** Initializing capture ***");
@@ -3662,6 +3662,8 @@ ov51x_init_isoc(struct usb_ov511 *ov)
urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL);
if (!urb) {
err("init isoc: usb_alloc_urb ret. NULL");
+ for (i = 0; i < n; i++)
+ usb_free_urb(ov->sbuf[i].urb);
return -ENOMEM;
}
ov->sbuf[n].urb = urb;
@@ -5651,7 +5653,7 @@ static ssize_t show_exposure(struct device *cd,
if (!ov->dev)
return -ENODEV;
sensor_get_exposure(ov, &exp);
- return sprintf(buf, "%d\n", exp >> 8);
+ return sprintf(buf, "%d\n", exp);
}
static DEVICE_ATTR(exposure, S_IRUGO, show_exposure, NULL);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
index 88e17516843..cbe2a341785 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
@@ -489,6 +489,8 @@ static const struct pvr2_device_desc pvr2_device_751xx = {
struct usb_device_id pvr2_device_table[] = {
{ USB_DEVICE(0x2040, 0x2900),
.driver_info = (kernel_ulong_t)&pvr2_device_29xxx},
+ { USB_DEVICE(0x2040, 0x2950), /* Logically identical to 2900 */
+ .driver_info = (kernel_ulong_t)&pvr2_device_29xxx},
{ USB_DEVICE(0x2040, 0x2400),
.driver_info = (kernel_ulong_t)&pvr2_device_24xxx},
{ USB_DEVICE(0x1164, 0x0622),
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index b1d09d8e2b8..92b83feae36 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -669,7 +669,7 @@ static void s2255_fillbuff(struct s2255_dev *dev, struct s2255_buffer *buf,
(unsigned long)vbuf, pos);
/* tell v4l buffer was filled */
- buf->vb.field_count++;
+ buf->vb.field_count = dev->frame_count[chn] * 2;
do_gettimeofday(&ts);
buf->vb.ts = ts;
buf->vb.state = VIDEOBUF_DONE;
@@ -1268,6 +1268,7 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
dev->last_frame[chn] = -1;
dev->bad_payload[chn] = 0;
dev->cur_frame[chn] = 0;
+ dev->frame_count[chn] = 0;
for (j = 0; j < SYS_FRAMES; j++) {
dev->buffer[chn].frame[j].ulState = 0;
dev->buffer[chn].frame[j].cur_size = 0;
diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c
index 9929d20320b..26194a0ce92 100644
--- a/drivers/media/video/saa7134/saa7134-alsa.c
+++ b/drivers/media/video/saa7134/saa7134-alsa.c
@@ -488,10 +488,12 @@ static int snd_card_saa7134_hw_params(struct snd_pcm_substream * substream,
period_size = params_period_bytes(hw_params);
periods = params_periods(hw_params);
- snd_assert(period_size >= 0x100 && period_size <= 0x10000,
- return -EINVAL);
- snd_assert(periods >= 4, return -EINVAL);
- snd_assert(period_size * periods <= 1024 * 1024, return -EINVAL);
+ if (period_size < 0x100 || period_size > 0x10000)
+ return -EINVAL;
+ if (periods < 4)
+ return -EINVAL;
+ if (period_size * periods > 1024 * 1024)
+ return -EINVAL;
dev = saa7134->dev;
@@ -942,7 +944,8 @@ static int snd_card_saa7134_new_mixer(snd_card_saa7134_t * chip)
unsigned int idx;
int err;
- snd_assert(chip != NULL, return -EINVAL);
+ if (snd_BUG_ON(!chip))
+ return -EINVAL;
strcpy(card->mixername, "SAA7134 Mixer");
for (idx = 0; idx < ARRAY_SIZE(snd_saa7134_controls); idx++) {
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 6ef3e5297de..feab12aa2c7 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -592,7 +592,7 @@ int uvc_query_v4l2_ctrl(struct uvc_video_device *video,
if (ctrl == NULL)
return -EINVAL;
- data = kmalloc(8, GFP_KERNEL);
+ data = kmalloc(ctrl->info->size, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index 168baabe465..11edf79f57b 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -911,7 +911,6 @@ static int w9968cf_start_transfer(struct w9968cf_device* cam)
for (i = 0; i < W9968CF_URBS; i++) {
urb = usb_alloc_urb(W9968CF_ISO_PACKETS, GFP_KERNEL);
- cam->urb[i] = urb;
if (!urb) {
for (j = 0; j < i; j++)
usb_free_urb(cam->urb[j]);
@@ -919,6 +918,7 @@ static int w9968cf_start_transfer(struct w9968cf_device* cam)
return -ENOMEM;
}
+ cam->urb[i] = urb;
urb->dev = udev;
urb->context = (void*)cam;
urb->pipe = usb_rcvisocpipe(udev, 1);
diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
index 95c79ad8048..54ac3fe26ec 100644
--- a/drivers/media/video/wm8739.c
+++ b/drivers/media/video/wm8739.c
@@ -274,10 +274,8 @@ static int wm8739_probe(struct i2c_client *client,
client->addr << 1, client->adapter->name);
state = kmalloc(sizeof(struct wm8739_state), GFP_KERNEL);
- if (state == NULL) {
- kfree(client);
+ if (state == NULL)
return -ENOMEM;
- }
state->vol_l = 0x17; /* 0dB */
state->vol_r = 0x17; /* 0dB */
state->muted = 0;
diff --git a/drivers/media/video/zoran_card.c b/drivers/media/video/zoran_card.c
index d842a7cb99d..3282be73029 100644
--- a/drivers/media/video/zoran_card.c
+++ b/drivers/media/video/zoran_card.c
@@ -988,7 +988,7 @@ zoran_open_init_params (struct zoran *zr)
zr->v4l_grab_seq = 0;
zr->v4l_settings.width = 192;
zr->v4l_settings.height = 144;
- zr->v4l_settings.format = &zoran_formats[4]; /* YUY2 - YUV-4:2:2 packed */
+ zr->v4l_settings.format = &zoran_formats[7]; /* YUY2 - YUV-4:2:2 packed */
zr->v4l_settings.bytesperline =
zr->v4l_settings.width *
((zr->v4l_settings.format->depth + 7) / 8);
diff --git a/drivers/media/video/zoran_driver.c b/drivers/media/video/zoran_driver.c
index ec6f59674b1..2dab9eea4de 100644
--- a/drivers/media/video/zoran_driver.c
+++ b/drivers/media/video/zoran_driver.c
@@ -134,7 +134,7 @@ const struct zoran_format zoran_formats[] = {
}, {
.name = "16-bit RGB BE",
ZFMT(-1,
- V4L2_PIX_FMT_RGB565, V4L2_COLORSPACE_SRGB),
+ V4L2_PIX_FMT_RGB565X, V4L2_COLORSPACE_SRGB),
.depth = 16,
.flags = ZORAN_FORMAT_CAPTURE |
ZORAN_FORMAT_OVERLAY,
@@ -2737,7 +2737,8 @@ zoran_do_ioctl (struct inode *inode,
fh->v4l_settings.format->fourcc;
fmt->fmt.pix.colorspace =
fh->v4l_settings.format->colorspace;
- fmt->fmt.pix.bytesperline = 0;
+ fmt->fmt.pix.bytesperline =
+ fh->v4l_settings.bytesperline;
if (BUZ_MAX_HEIGHT <
(fh->v4l_settings.height * 2))
fmt->fmt.pix.field =
@@ -2833,13 +2834,6 @@ zoran_do_ioctl (struct inode *inode,
fmt->fmt.pix.pixelformat,
(char *) &printformat);
- if (fmt->fmt.pix.bytesperline > 0) {
- dprintk(5,
- KERN_ERR "%s: bpl not supported\n",
- ZR_DEVNAME(zr));
- return -EINVAL;
- }
-
/* we can be requested to do JPEG/raw playback/capture */
if (!
(fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
@@ -2923,6 +2917,7 @@ zoran_do_ioctl (struct inode *inode,
fh->jpg_buffers.buffer_size =
zoran_v4l2_calc_bufsize(&fh->
jpg_settings);
+ fmt->fmt.pix.bytesperline = 0;
fmt->fmt.pix.sizeimage =
fh->jpg_buffers.buffer_size;
@@ -2979,6 +2974,8 @@ zoran_do_ioctl (struct inode *inode,
/* tell the user the
* results/missing stuff */
+ fmt->fmt.pix.bytesperline =
+ fh->v4l_settings.bytesperline;
fmt->fmt.pix.sizeimage =
fh->v4l_settings.height *
fh->v4l_settings.bytesperline;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index d2d2318dafa..6e291bf8237 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -197,7 +197,7 @@ static int mspro_block_bd_open(struct inode *inode, struct file *filp)
static int mspro_block_disk_release(struct gendisk *disk)
{
struct mspro_block_data *msb = disk->private_data;
- int disk_id = disk->first_minor >> MSPRO_BLOCK_PART_SHIFT;
+ int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT;
mutex_lock(&mspro_block_disk_lock);
@@ -828,7 +828,7 @@ static void mspro_block_submit_req(struct request_queue *q)
if (msb->eject) {
while ((req = elv_next_request(q)) != NULL)
- end_queued_request(req, -ENODEV);
+ __blk_end_request(req, -ENODEV, blk_rq_bytes(req));
return;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 10c44d3fe01..68dc8d9eb24 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -21,7 +21,7 @@ config MFD_SM501
config MFD_SM501_GPIO
bool "Export GPIO via GPIO layer"
- depends on MFD_SM501 && HAVE_GPIO_LIB
+ depends on MFD_SM501 && GPIOLIB
---help---
This option uses the gpio library layer to export the 64 GPIO
lines on the SM501. The platform data is used to supply the
@@ -29,7 +29,7 @@ config MFD_SM501_GPIO
config MFD_ASIC3
bool "Support for Compaq ASIC3"
- depends on GENERIC_HARDIRQS && HAVE_GPIO_LIB && ARM
+ depends on GENERIC_HARDIRQS && GPIOLIB && ARM
---help---
This driver supports the ASIC3 multifunction chip found on many
PDAs (mainly iPAQ and HTC based ones)
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index bc2a807f210..ba5aa200827 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -312,7 +312,6 @@ static int __init asic3_irq_probe(struct platform_device *pdev)
struct asic3 *asic = platform_get_drvdata(pdev);
unsigned long clksel = 0;
unsigned int irq, irq_base;
- int map_size;
int ret;
ret = platform_get_irq(pdev, 0);
@@ -534,6 +533,7 @@ static int __init asic3_probe(struct platform_device *pdev)
struct asic3 *asic;
struct resource *mem;
unsigned long clksel;
+ int map_size;
int ret = 0;
asic = kzalloc(sizeof(struct asic3), GFP_KERNEL);
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c
index facdb9893c8..1ee8501e90f 100644
--- a/drivers/misc/eeepc-laptop.c
+++ b/drivers/misc/eeepc-laptop.c
@@ -450,12 +450,14 @@ static int eeepc_get_fan_pwm(void)
int value = 0;
read_acpi_int(NULL, EEEPC_EC_FAN_PWM, &value);
+ value = value * 255 / 100;
return (value);
}
static void eeepc_set_fan_pwm(int value)
{
- value = SENSORS_LIMIT(value, 0, 100);
+ value = SENSORS_LIMIT(value, 0, 255);
+ value = value * 100 / 255;
ec_write(EEEPC_EC_SC02, value);
}
@@ -520,15 +522,23 @@ static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0);
EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
-EEEPC_CREATE_SENSOR_ATTR(fan1_pwm, S_IRUGO | S_IWUSR,
+EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
eeepc_get_fan_pwm, eeepc_set_fan_pwm);
EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
+static ssize_t
+show_name(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "eeepc\n");
+}
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+
static struct attribute *hwmon_attributes[] = {
- &sensor_dev_attr_fan1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
+ &sensor_dev_attr_name.dev_attr.attr,
NULL
};
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 6986f392624..efacee0404a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -29,6 +29,7 @@
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
+#include <linux/string_helpers.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -83,7 +84,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
mutex_lock(&open_lock);
md->usage--;
if (md->usage == 0) {
- int devidx = md->disk->first_minor >> MMC_SHIFT;
+ int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
__clear_bit(devidx, dev_use);
put_disk(md->disk);
@@ -532,6 +533,8 @@ static int mmc_blk_probe(struct mmc_card *card)
struct mmc_blk_data *md;
int err;
+ char cap_str[10];
+
/*
* Check that the card supports the command class(es) we need.
*/
@@ -546,10 +549,11 @@ static int mmc_blk_probe(struct mmc_card *card)
if (err)
goto out;
- printk(KERN_INFO "%s: %s %s %lluKiB %s\n",
+ string_get_size(get_capacity(md->disk) << 9, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+ printk(KERN_INFO "%s: %s %s %s %s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
- (unsigned long long)(get_capacity(md->disk) >> 1),
- md->read_only ? "(ro)" : "");
+ cap_str, md->read_only ? "(ro)" : "");
mmc_set_drvdata(card, md);
add_disk(md->disk);
@@ -615,14 +619,19 @@ static struct mmc_driver mmc_driver = {
static int __init mmc_blk_init(void)
{
- int res = -ENOMEM;
+ int res;
res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
if (res)
goto out;
- return mmc_register_driver(&mmc_driver);
+ res = mmc_register_driver(&mmc_driver);
+ if (res)
+ goto out2;
+ return 0;
+ out2:
+ unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
out:
return res;
}
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index f26b01d811a..b92b172074e 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -1040,7 +1040,7 @@ static const struct mmc_test_case mmc_test_cases[] = {
};
-static struct mutex mmc_test_lock;
+static DEFINE_MUTEX(mmc_test_lock);
static void mmc_test_run(struct mmc_test_card *test, int testcase)
{
@@ -1171,8 +1171,6 @@ static int mmc_test_probe(struct mmc_card *card)
if ((card->type != MMC_TYPE_MMC) && (card->type != MMC_TYPE_SD))
return -ENODEV;
- mutex_init(&mmc_test_lock);
-
ret = device_create_file(&card->dev, &dev_attr_test);
if (ret)
return ret;
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 0bd06f5bd62..00008967ef7 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -195,7 +195,9 @@ static int atmci_regs_show(struct seq_file *s, void *v)
/* Grab a more or less consistent snapshot */
spin_lock_irq(&host->mmc->lock);
+ clk_enable(host->mck);
memcpy_fromio(buf, host->regs, MCI_REGS_SIZE);
+ clk_disable(host->mck);
spin_unlock_irq(&host->mmc->lock);
seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
@@ -216,6 +218,8 @@ static int atmci_regs_show(struct seq_file *s, void *v)
atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
+ kfree(buf);
+
return 0;
}
@@ -237,7 +241,6 @@ static void atmci_init_debugfs(struct atmel_mci *host)
struct mmc_host *mmc;
struct dentry *root;
struct dentry *node;
- struct resource *res;
mmc = host->mmc;
root = mmc->debugfs_root;
@@ -251,9 +254,6 @@ static void atmci_init_debugfs(struct atmel_mci *host)
if (!node)
goto err;
- res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
- node->d_inode->i_size = res->end - res->start + 1;
-
node = debugfs_create_file("req", S_IRUSR, root, host, &atmci_req_fops);
if (!node)
goto err;
@@ -426,8 +426,6 @@ static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data)
host->sg = NULL;
host->data = data;
- mci_writel(host, BLKR, MCI_BCNT(data->blocks)
- | MCI_BLKLEN(data->blksz));
dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n",
MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
@@ -483,6 +481,10 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (data->blocks > 1 && data->blksz & 3)
goto fail;
atmci_set_timeout(host, data);
+
+ /* Must set block count/size before sending command */
+ mci_writel(host, BLKR, MCI_BCNT(data->blocks)
+ | MCI_BLKLEN(data->blksz));
}
iflags = MCI_CMDRDY;
@@ -1059,6 +1061,10 @@ static int __init atmci_probe(struct platform_device *pdev)
host->present = !gpio_get_value(host->detect_pin);
}
}
+
+ if (!gpio_is_valid(host->detect_pin))
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+
if (gpio_is_valid(host->wp_pin)) {
if (gpio_request(host->wp_pin, "mmc_wp")) {
dev_dbg(&mmc->class_dev, "no WP pin available\n");
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 9e647a06054..ba2b4240a86 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -159,10 +159,10 @@ static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
#define STATUS_TO_TEXT(a) \
do { \
if (status & TMIO_STAT_##a) \
- printf(#a); \
+ printk(#a); \
} while (0)
-void debug_status(u32 status)
+void pr_debug_status(u32 status)
{
printk(KERN_DEBUG "status: %08x = ", status);
STATUS_TO_TEXT(CARD_REMOVE);
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index f34f20c7891..9bf581c4f74 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1005,6 +1005,29 @@ static int ftl_writesect(struct mtd_blktrans_dev *dev,
return ftl_write((void *)dev, buf, block, 1);
}
+static int ftl_discardsect(struct mtd_blktrans_dev *dev,
+ unsigned long sector, unsigned nr_sects)
+{
+ partition_t *part = (void *)dev;
+ uint32_t bsize = 1 << part->header.EraseUnitSize;
+
+ DEBUG(1, "FTL erase sector %ld for %d sectors\n",
+ sector, nr_sects);
+
+ while (nr_sects) {
+ uint32_t old_addr = part->VirtualBlockMap[sector];
+ if (old_addr != 0xffffffff) {
+ part->VirtualBlockMap[sector] = 0xffffffff;
+ part->EUNInfo[old_addr/bsize].Deleted++;
+ if (set_bam_entry(part, old_addr, 0))
+ return -EIO;
+ }
+ nr_sects--;
+ sector++;
+ }
+
+ return 0;
+}
/*====================================================================*/
static void ftl_freepart(partition_t *part)
@@ -1069,6 +1092,7 @@ static struct mtd_blktrans_ops ftl_tr = {
.blksize = SECTOR_SIZE,
.readsect = ftl_readsect,
.writesect = ftl_writesect,
+ .discard = ftl_discardsect,
.getgeo = ftl_getgeo,
.add_mtd = ftl_add_mtd,
.remove_dev = ftl_remove_dev,
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 9ff007c4962..681d5aca2af 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -32,6 +32,14 @@ struct mtd_blkcore_priv {
spinlock_t queue_lock;
};
+static int blktrans_discard_request(struct request_queue *q,
+ struct request *req)
+{
+ req->cmd_type = REQ_TYPE_LINUX_BLOCK;
+ req->cmd[0] = REQ_LB_OP_DISCARD;
+ return 0;
+}
+
static int do_blktrans_request(struct mtd_blktrans_ops *tr,
struct mtd_blktrans_dev *dev,
struct request *req)
@@ -44,6 +52,10 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
buf = req->buffer;
+ if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
+ req->cmd[0] == REQ_LB_OP_DISCARD)
+ return !tr->discard(dev, block, nsect);
+
if (!blk_fs_request(req))
return 0;
@@ -367,6 +379,10 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
tr->blkcore_priv->rq->queuedata = tr;
blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
+ if (tr->discard)
+ blk_queue_set_discard(tr->blkcore_priv->rq,
+ blktrans_discard_request);
+
tr->blkshift = ffs(tr->blksize) - 1;
tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index c3c579f98ed..dfacd31f7ed 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6597,7 +6597,7 @@ struct flash_spec {
struct bnx2_irq {
irq_handler_t handler;
- u16 vector;
+ unsigned int vector;
u8 requested;
char name[16];
};
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 453115acaad..5cf78d612c4 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2738,9 +2738,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
nic->flags |= wol_magic;
/* ack any pending wake events, disable PME */
- err = pci_enable_wake(pdev, 0, 0);
- if (err)
- DPRINTK(PROBE, ERR, "Error clearing wake event\n");
+ pci_pme_active(pdev, false);
strcpy(netdev->name, "eth%d");
if((err = register_netdev(netdev))) {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 9d6edf3e73f..d04eef53571 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -144,6 +144,8 @@ static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer);
static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex);
static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
/* IGP cable length table */
static const
@@ -168,6 +170,8 @@ u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
104, 109, 114, 118, 121, 124};
+static DEFINE_SPINLOCK(e1000_eeprom_lock);
+
/******************************************************************************
* Set the phy type member in the hw struct.
*
@@ -4904,6 +4908,15 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
*****************************************************************************/
s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
+ s32 ret;
+ spin_lock(&e1000_eeprom_lock);
+ ret = e1000_do_read_eeprom(hw, offset, words, data);
+ spin_unlock(&e1000_eeprom_lock);
+ return ret;
+}
+
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 i = 0;
@@ -5236,6 +5249,16 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
*****************************************************************************/
s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
+ s32 ret;
+ spin_lock(&e1000_eeprom_lock);
+ ret = e1000_do_write_eeprom(hw, offset, words, data);
+ spin_unlock(&e1000_eeprom_lock);
+ return ret;
+}
+
+
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
struct e1000_eeprom_info *eeprom = &hw->eeprom;
s32 status = 0;
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index ac4e506b4f8..5ea6b60fa37 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -257,7 +257,6 @@ struct e1000_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
- spinlock_t stats_lock; /* prevent concurrent stats updates */
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
@@ -284,6 +283,8 @@ struct e1000_adapter {
unsigned long led_status;
unsigned int flags;
+ struct work_struct downshift_task;
+ struct work_struct update_phy_task;
};
struct e1000_info {
@@ -305,6 +306,7 @@ struct e1000_info {
#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
+#define FLAG_READ_ONLY_NVM (1 << 8)
#define FLAG_IS_ICH (1 << 9)
#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
#define FLAG_IS_QUAD_PORT_A (1 << 12)
@@ -385,6 +387,7 @@ extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
+extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index e21c9e0f373..33a3ff17b5d 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -432,6 +432,10 @@ static void e1000_get_regs(struct net_device *netdev,
regs_buff[11] = er32(TIDV);
regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
+
+ /* ethtool doesn't use anything past this point, so all this
+ * code is likely legacy junk for apps that may or may not
+ * exist */
if (hw->phy.type == e1000_phy_m88) {
e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
regs_buff[13] = (u32)phy_data; /* cable length */
@@ -447,7 +451,7 @@ static void e1000_get_regs(struct net_device *netdev,
regs_buff[22] = adapter->phy_stats.receive_errors;
regs_buff[23] = regs_buff[13]; /* mdix mode */
}
- regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
+ regs_buff[21] = 0; /* was idle_errors */
e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
regs_buff[24] = (u32)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
@@ -529,6 +533,9 @@ static int e1000_set_eeprom(struct net_device *netdev,
if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
return -EFAULT;
+ if (adapter->flags & FLAG_READ_ONLY_NVM)
+ return -EINVAL;
+
max_len = hw->nvm.word_size * 2;
first_word = eeprom->offset >> 1;
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9e38452a738..bcd2bc477af 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -58,6 +58,7 @@
#define ICH_FLASH_HSFCTL 0x0006
#define ICH_FLASH_FADDR 0x0008
#define ICH_FLASH_FDATA0 0x0010
+#define ICH_FLASH_PR0 0x0074
#define ICH_FLASH_READ_COMMAND_TIMEOUT 500
#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
@@ -150,6 +151,19 @@ union ich8_hws_flash_regacc {
u16 regval;
};
+/* ICH Flash Protected Region */
+union ich8_flash_protected_range {
+ struct ich8_pr {
+ u32 base:13; /* 0:12 Protected Range Base */
+ u32 reserved1:2; /* 13:14 Reserved */
+ u32 rpe:1; /* 15 Read Protection Enable */
+ u32 limit:13; /* 16:28 Protected Range Limit */
+ u32 reserved2:2; /* 29:30 Reserved */
+ u32 wpe:1; /* 31 Write Protection Enable */
+ } range;
+ u32 regval;
+};
+
static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
@@ -366,6 +380,9 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
return 0;
}
+static DEFINE_MUTEX(nvm_mutex);
+static pid_t nvm_owner = -1;
+
/**
* e1000_acquire_swflag_ich8lan - Acquire software control flag
* @hw: pointer to the HW structure
@@ -379,6 +396,15 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
u32 extcnf_ctrl;
u32 timeout = PHY_CFG_TIMEOUT;
+ might_sleep();
+
+ if (!mutex_trylock(&nvm_mutex)) {
+ WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n",
+ nvm_owner);
+ mutex_lock(&nvm_mutex);
+ }
+ nvm_owner = current->pid;
+
while (timeout) {
extcnf_ctrl = er32(EXTCNF_CTRL);
extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
@@ -393,6 +419,8 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
if (!timeout) {
hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
+ nvm_owner = -1;
+ mutex_unlock(&nvm_mutex);
return -E1000_ERR_CONFIG;
}
@@ -414,6 +442,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
extcnf_ctrl = er32(EXTCNF_CTRL);
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+ nvm_owner = -1;
+ mutex_unlock(&nvm_mutex);
}
/**
@@ -1284,6 +1315,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
* programming failed.
*/
if (ret_val) {
+ /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
hw_dbg(hw, "Flash commit failed.\n");
e1000_release_swflag_ich8lan(hw);
return ret_val;
@@ -1374,6 +1406,49 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
+ * @hw: pointer to the HW structure
+ *
+ * To prevent malicious write/erase of the NVM, set it to be read-only
+ * so that the hardware ignores all write/erase cycles of the NVM via
+ * the flash control registers. The shadow-ram copy of the NVM will
+ * still be updated, however any updates to this copy will not stick
+ * across driver reloads.
+ **/
+void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
+{
+ union ich8_flash_protected_range pr0;
+ union ich8_hws_flash_status hsfsts;
+ u32 gfpreg;
+ s32 ret_val;
+
+ ret_val = e1000_acquire_swflag_ich8lan(hw);
+ if (ret_val)
+ return;
+
+ gfpreg = er32flash(ICH_FLASH_GFPREG);
+
+ /* Write-protect GbE Sector of NVM */
+ pr0.regval = er32flash(ICH_FLASH_PR0);
+ pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
+ pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
+ pr0.range.wpe = true;
+ ew32flash(ICH_FLASH_PR0, pr0.regval);
+
+ /*
+ * Lock down a subset of GbE Flash Control Registers, e.g.
+ * PR0 to prevent the write-protection from being lifted.
+ * Once FLOCKDN is set, the registers protected by it cannot
+ * be written until FLOCKDN is cleared by a hardware reset.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ hsfsts.hsf_status.flockdn = true;
+ ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+
+ e1000_release_swflag_ich8lan(hw);
+}
+
+/**
* e1000_write_flash_data_ich8lan - Writes bytes to the NVM
* @hw: pointer to the HW structure
* @offset: The offset (in bytes) of the byte/word to read.
@@ -1720,6 +1795,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ew32(CTRL, (ctrl | E1000_CTRL_RST));
msleep(20);
+ /* release the swflag because it is not reset by hardware reset */
+ e1000_release_swflag_ich8lan(hw);
+
ret_val = e1000e_get_auto_rd_done(hw);
if (ret_val) {
/*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d266510c8a9..b81c4237b5d 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -47,7 +47,7 @@
#include "e1000.h"
-#define DRV_VERSION "0.3.3.3-k2"
+#define DRV_VERSION "0.3.3.3-k6"
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -1115,6 +1115,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
writel(0, adapter->hw.hw_addr + rx_ring->tail);
}
+static void e1000e_downshift_workaround(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter, downshift_task);
+
+ e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
+}
+
/**
* e1000_intr_msi - Interrupt Handler
* @irq: interrupt number
@@ -1139,7 +1147,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
*/
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
(!(er32(STATUS) & E1000_STATUS_LU)))
- e1000e_gig_downshift_workaround_ich8lan(hw);
+ schedule_work(&adapter->downshift_task);
/*
* 80003ES2LAN workaround-- For packet buffer work-around on
@@ -1205,7 +1213,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
*/
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
(!(er32(STATUS) & E1000_STATUS_LU)))
- e1000e_gig_downshift_workaround_ich8lan(hw);
+ schedule_work(&adapter->downshift_task);
/*
* 80003ES2LAN workaround--
@@ -2592,8 +2600,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
/* Explicitly disable IRQ since the NIC can be in any state. */
e1000_irq_disable(adapter);
- spin_lock_init(&adapter->stats_lock);
-
set_bit(__E1000_DOWN, &adapter->state);
return 0;
@@ -2912,6 +2918,21 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
return 0;
}
+/**
+ * e1000e_update_phy_task - work thread to update phy
+ * @work: pointer to our work struct
+ *
+ * this worker thread exists because we must acquire a
+ * semaphore to read the phy, which we could msleep while
+ * waiting for it, and we can't msleep in a timer.
+ **/
+static void e1000e_update_phy_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter, update_phy_task);
+ e1000_get_phy_info(&adapter->hw);
+}
+
/*
* Need to wait a few seconds after link up to get diagnostic information from
* the phy
@@ -2919,7 +2940,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
static void e1000_update_phy_info(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
- e1000_get_phy_info(&adapter->hw);
+ schedule_work(&adapter->update_phy_task);
}
/**
@@ -2930,10 +2951,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- unsigned long irq_flags;
- u16 phy_tmp;
-
-#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
/*
* Prevent stats update while adapter is being reset, or if the pci
@@ -2944,14 +2961,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if (pci_channel_offline(pdev))
return;
- spin_lock_irqsave(&adapter->stats_lock, irq_flags);
-
- /*
- * these counters are modified from e1000_adjust_tbi_stats,
- * called from the interrupt context, so they must only
- * be written while holding adapter->stats_lock
- */
-
adapter->stats.crcerrs += er32(CRCERRS);
adapter->stats.gprc += er32(GPRC);
adapter->stats.gorc += er32(GORCL);
@@ -3022,21 +3031,10 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
/* Tx Dropped needs to be maintained elsewhere */
- /* Phy Stats */
- if (hw->phy.media_type == e1000_media_type_copper) {
- if ((adapter->link_speed == SPEED_1000) &&
- (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) {
- phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
- adapter->phy_stats.idle_errors += phy_tmp;
- }
- }
-
/* Management Stats */
adapter->stats.mgptc += er32(MGTPTC);
adapter->stats.mgprc += er32(MGTPRC);
adapter->stats.mgpdc += er32(MGTPDC);
-
- spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
}
/**
@@ -3048,10 +3046,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_phy_regs *phy = &adapter->phy_regs;
int ret_val;
- unsigned long irq_flags;
-
-
- spin_lock_irqsave(&adapter->stats_lock, irq_flags);
if ((er32(STATUS) & E1000_STATUS_LU) &&
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
@@ -3082,8 +3076,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
phy->stat1000 = 0;
phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
}
-
- spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
}
static void e1000_print_link_info(struct e1000_adapter *adapter)
@@ -4467,6 +4459,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->bd_number = cards_found++;
+ e1000e_check_options(adapter);
+
/* setup adapter struct */
err = e1000_sw_init(adapter);
if (err)
@@ -4482,6 +4476,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
goto err_hw_init;
+ if ((adapter->flags & FLAG_IS_ICH) &&
+ (adapter->flags & FLAG_READ_ONLY_NVM))
+ e1000e_write_protect_nvm_ich8lan(&adapter->hw);
+
hw->mac.ops.get_bus_info(&adapter->hw);
adapter->hw.phy.autoneg_wait_to_complete = 0;
@@ -4572,8 +4570,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->reset_task, e1000_reset_task);
INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
-
- e1000e_check_options(adapter);
+ INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
+ INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
/* Initialize link parameters. User can change them with ethtool */
adapter->hw.mac.autoneg = 1;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index ed912e023a7..d91dbf7ba43 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -133,6 +133,15 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
*/
E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+/*
+ * Write Protect NVM
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
+
struct e1000_option {
enum { enable_option, range_option, list_option } type;
const char *name;
@@ -388,4 +397,25 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
opt.def);
}
}
+ { /* Write-protect NVM */
+ const struct e1000_option opt = {
+ .type = enable_option,
+ .name = "Write-protect NVM",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (adapter->flags & FLAG_IS_ICH) {
+ if (num_WriteProtectNVM > bd) {
+ unsigned int write_protect_nvm = WriteProtectNVM[bd];
+ e1000_validate_option(&write_protect_nvm, &opt,
+ adapter);
+ if (write_protect_nvm)
+ adapter->flags |= FLAG_READ_ONLY_NVM;
+ } else {
+ if (opt.def)
+ adapter->flags |= FLAG_READ_ONLY_NVM;
+ }
+ }
+ }
}
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0b6ecef9a84..eeb55ed2152 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5643,6 +5643,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
+ printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
}
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
@@ -5890,14 +5891,12 @@ static void nv_restore_phy(struct net_device *dev)
}
}
-static void __devexit nv_remove(struct pci_dev *pci_dev)
+static void nv_restore_mac_addr(struct pci_dev *pci_dev)
{
struct net_device *dev = pci_get_drvdata(pci_dev);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- unregister_netdev(dev);
-
/* special op: write back the misordered MAC address - otherwise
* the next nv_probe would see a wrong address.
*/
@@ -5905,6 +5904,15 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
writel(np->orig_mac[1], base + NvRegMacAddrB);
writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
base + NvRegTransmitPoll);
+}
+
+static void __devexit nv_remove(struct pci_dev *pci_dev)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+
+ unregister_netdev(dev);
+
+ nv_restore_mac_addr(pci_dev);
/* restore any phy related changes */
nv_restore_phy(dev);
@@ -5975,6 +5983,8 @@ static void nv_shutdown(struct pci_dev *pdev)
if (netif_running(dev))
nv_close(dev);
+ nv_restore_mac_addr(pdev);
+
pci_disable_device(pdev);
if (system_state == SYSTEM_POWER_OFF) {
if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index 8239939554b..fbbd3e660c2 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -139,7 +139,7 @@ static int __init do_hpp_probe(struct net_device *dev)
#ifndef MODULE
struct net_device * __init hp_plus_probe(int unit)
{
- struct net_device *dev = alloc_ei_netdev();
+ struct net_device *dev = alloc_eip_netdev();
int err;
if (!dev)
@@ -284,7 +284,7 @@ hpp_open(struct net_device *dev)
int option_reg;
int retval;
- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) {
+ if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) {
return retval;
}
@@ -302,7 +302,7 @@ hpp_open(struct net_device *dev)
/* Select the operational page. */
outw(Perf_Page, ioaddr + HP_PAGING);
- ei_open(dev);
+ eip_open(dev);
return 0;
}
@@ -313,7 +313,7 @@ hpp_close(struct net_device *dev)
int option_reg = inw(ioaddr + HPP_OPTION);
free_irq(dev->irq, dev);
- ei_close(dev);
+ eip_close(dev);
outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset,
ioaddr + HPP_OPTION);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 62071d9c4a5..d1dd5b48dbd 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -67,11 +67,10 @@ struct mlx4_mpt_entry {
#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
#define MLX4_MPT_FLAG_REGION (1 << 8)
-#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 26)
+#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
+#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
-#define MLX4_MTT_FLAG_PRESENT 1
-
#define MLX4_MPT_STATUS_SW 0xF0
#define MLX4_MPT_STATUS_HW 0x00
@@ -348,7 +347,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
/* fast register MR in free state */
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
- mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG);
+ mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
+ MLX4_MPT_PD_FLAG_RAE);
+ mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
+ MLX4_MTT_ENTRY_PER_SEG);
} else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
}
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
index f6c45288d0e..87e37bc3914 100644
--- a/drivers/net/wireless/ath9k/core.c
+++ b/drivers/net/wireless/ath9k/core.c
@@ -294,8 +294,6 @@ static int ath_stop(struct ath_softc *sc)
* hardware is gone (invalid).
*/
- if (!sc->sc_invalid)
- ath9k_hw_set_interrupts(ah, 0);
ath_draintxq(sc, false);
if (!sc->sc_invalid) {
ath_stoprecv(sc);
@@ -797,6 +795,12 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
sc->sc_imask |= ATH9K_INT_CST;
+ /* Note: We disable MIB interrupts for now as we don't yet
+ * handle processing ANI, otherwise you will get an interrupt
+ * storm after about 7 hours of usage making the system unusable
+ * with huge latency. Once we do have ANI processing included
+ * we can re-enable this interrupt. */
+#if 0
/*
* Enable MIB interrupts when there are hardware phy counters.
* Note we only do this (at the moment) for station mode.
@@ -804,6 +808,7 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
if (ath9k_hw_phycounters(ah) &&
((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS)))
sc->sc_imask |= ATH9K_INT_MIB;
+#endif
/*
* Some hardware processes the TIM IE and fires an
* interrupt when the TIM bit is set. For hardware
@@ -1336,6 +1341,8 @@ void ath_deinit(struct ath_softc *sc)
DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
ath_stop(sc);
if (!sc->sc_invalid)
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
index 4ee695b76b8..2f84093331e 100644
--- a/drivers/net/wireless/ath9k/core.h
+++ b/drivers/net/wireless/ath9k/core.h
@@ -974,7 +974,6 @@ struct ath_softc {
u32 sc_keymax; /* size of key cache */
DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */
u8 sc_splitmic; /* split TKIP MIC keys */
- int sc_keytype;
/* RX */
struct list_head sc_rxbuf;
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index 99badf1404c..acebdf1d20a 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -206,8 +206,6 @@ static int ath_key_config(struct ath_softc *sc,
if (!ret)
return -EIO;
- if (mac)
- sc->sc_keytype = hk.kv_type;
return 0;
}
@@ -778,7 +776,6 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
case DISABLE_KEY:
ath_key_delete(sc, key);
clear_bit(key->keyidx, sc->sc_keymap);
- sc->sc_keytype = ATH9K_CIPHER_CLR;
break;
default:
ret = -EINVAL;
@@ -1414,10 +1411,17 @@ static void ath_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_softc *sc = hw->priv;
+ enum ath9k_int status;
- if (pdev->irq)
+ if (pdev->irq) {
+ ath9k_hw_set_interrupts(sc->sc_ah, 0);
+ /* clear the ISR */
+ ath9k_hw_getisr(sc->sc_ah, &status);
+ sc->sc_invalid = 1;
free_irq(pdev->irq, sc);
+ }
ath_detach(sc);
+
pci_iounmap(pdev, sc->mem);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index 550129f717e..8b332e11a65 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -315,11 +315,11 @@ static int ath_tx_prepare(struct ath_softc *sc,
txctl->keyix = tx_info->control.hw_key->hw_key_idx;
txctl->frmlen += tx_info->control.icv_len;
- if (sc->sc_keytype == ATH9K_CIPHER_WEP)
+ if (tx_info->control.hw_key->alg == ALG_WEP)
txctl->keytype = ATH9K_KEY_TYPE_WEP;
- else if (sc->sc_keytype == ATH9K_CIPHER_TKIP)
+ else if (tx_info->control.hw_key->alg == ALG_TKIP)
txctl->keytype = ATH9K_KEY_TYPE_TKIP;
- else if (sc->sc_keytype == ATH9K_CIPHER_AES_CCM)
+ else if (tx_info->control.hw_key->alg == ALG_CCMP)
txctl->keytype = ATH9K_KEY_TYPE_AES;
}
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index fec5645944a..34ae125d538 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -43,23 +43,6 @@ static bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
return 0;
}
-/* Update the rfkill state */
-static void b43_rfkill_update_state(struct b43_wldev *dev)
-{
- struct b43_rfkill *rfk = &(dev->wl->rfkill);
-
- if (!dev->radio_hw_enable) {
- rfk->rfkill->state = RFKILL_STATE_HARD_BLOCKED;
- return;
- }
-
- if (!dev->phy.radio_on)
- rfk->rfkill->state = RFKILL_STATE_SOFT_BLOCKED;
- else
- rfk->rfkill->state = RFKILL_STATE_UNBLOCKED;
-
-}
-
/* The poll callback for the hardware button. */
static void b43_rfkill_poll(struct input_polled_dev *poll_dev)
{
@@ -77,7 +60,6 @@ static void b43_rfkill_poll(struct input_polled_dev *poll_dev)
if (unlikely(enabled != dev->radio_hw_enable)) {
dev->radio_hw_enable = enabled;
report_change = 1;
- b43_rfkill_update_state(dev);
b43info(wl, "Radio hardware status changed to %s\n",
enabled ? "ENABLED" : "DISABLED");
}
diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c
index 476add97e97..b32bf6a94f1 100644
--- a/drivers/net/wireless/b43legacy/rfkill.c
+++ b/drivers/net/wireless/b43legacy/rfkill.c
@@ -44,23 +44,6 @@ static bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
return 0;
}
-/* Update the rfkill state */
-static void b43legacy_rfkill_update_state(struct b43legacy_wldev *dev)
-{
- struct b43legacy_rfkill *rfk = &(dev->wl->rfkill);
-
- if (!dev->radio_hw_enable) {
- rfk->rfkill->state = RFKILL_STATE_HARD_BLOCKED;
- return;
- }
-
- if (!dev->phy.radio_on)
- rfk->rfkill->state = RFKILL_STATE_SOFT_BLOCKED;
- else
- rfk->rfkill->state = RFKILL_STATE_UNBLOCKED;
-
-}
-
/* The poll callback for the hardware button. */
static void b43legacy_rfkill_poll(struct input_polled_dev *poll_dev)
{
@@ -78,7 +61,6 @@ static void b43legacy_rfkill_poll(struct input_polled_dev *poll_dev)
if (unlikely(enabled != dev->radio_hw_enable)) {
dev->radio_hw_enable = enabled;
report_change = 1;
- b43legacy_rfkill_update_state(dev);
b43legacyinfo(wl, "Radio hardware status changed to %s\n",
enabled ? "ENABLED" : "DISABLED");
}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index da8b7433e3a..a60ae86bd5c 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -58,6 +58,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
+ { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
/* ZD1211B */
{ USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 9c718583a23..77baff022f7 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <linux/topology.h>
@@ -484,6 +485,21 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
#endif /* HAVE_PCI_LEGACY */
#ifdef HAVE_PCI_MMAP
+
+static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma)
+{
+ unsigned long nr, start, size;
+
+ nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ start = vma->vm_pgoff;
+ size = pci_resource_len(pdev, resno) >> PAGE_SHIFT;
+ if (start < size && size - start >= nr)
+ return 1;
+ WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
+ current->comm, start, start+nr, pci_name(pdev), resno, size);
+ return 0;
+}
+
/**
* pci_mmap_resource - map a PCI resource into user memory space
* @kobj: kobject for mapping
@@ -510,6 +526,9 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
+ if (!pci_mmap_fits(pdev, i, vma))
+ return -EINVAL;
+
/* pci_mmap_page_range() expects the same kind of entry as coming
* from /proc/bus/pci/ which is a "user visible" value. If this is
* different from the resource itself, arch will do necessary fixup.
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 9a7c9e1408a..851f5b83cdb 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -527,7 +527,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
*/
pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP,
&reg32);
- if (!(reg32 & PCI_EXP_DEVCAP_RBER && !aspm_force)) {
+ if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
printk("Pre-1.1 PCIe device detected, "
"disable ASPM for %s. It can be enabled forcedly"
" with 'pcie_aspm=force'\n", pci_name(pdev));
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 3b3b5f17879..4edfc4731bd 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -162,7 +162,7 @@ EXPORT_SYMBOL(pci_find_slot);
* time.
*/
struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
- const struct pci_dev *from)
+ struct pci_dev *from)
{
struct pci_dev *pdev;
@@ -263,7 +263,7 @@ static int match_pci_dev_by_id(struct device *dev, void *data)
* this file.
*/
static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
- const struct pci_dev *from)
+ struct pci_dev *from)
{
struct device *dev;
struct device *dev_start = NULL;
@@ -303,7 +303,7 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
*/
struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
unsigned int ss_vendor, unsigned int ss_device,
- const struct pci_dev *from)
+ struct pci_dev *from)
{
struct pci_dev *pdev;
struct pci_device_id *id;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 4174d9656e3..34c83d3ca0f 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -427,6 +427,18 @@ static int pcmcia_device_probe(struct device * dev)
p_drv = to_pcmcia_drv(dev->driver);
s = p_dev->socket;
+ /* The PCMCIA code passes the match data in via dev->driver_data
+ * which is an ugly hack. Once the driver probe is called it may
+ * and often will overwrite the match data so we must save it first
+ *
+ * handle pseudo multifunction devices:
+ * there are at most two pseudo multifunction devices.
+ * if we're matching against the first, schedule a
+ * call which will then check whether there are two
+ * pseudo devices, and if not, add the second one.
+ */
+ did = p_dev->dev.driver_data;
+
ds_dbg(1, "trying to bind %s to %s\n", p_dev->dev.bus_id,
p_drv->drv.name);
@@ -455,21 +467,14 @@ static int pcmcia_device_probe(struct device * dev)
goto put_module;
}
- /* handle pseudo multifunction devices:
- * there are at most two pseudo multifunction devices.
- * if we're matching against the first, schedule a
- * call which will then check whether there are two
- * pseudo devices, and if not, add the second one.
- */
- did = p_dev->dev.driver_data;
if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
(p_dev->socket->device_count == 1) && (p_dev->device_no == 0))
pcmcia_add_device_later(p_dev->socket, 0);
- put_module:
+put_module:
if (ret)
module_put(p_drv->owner);
- put_dev:
+put_dev:
if (ret)
put_device(dev);
return (ret);
diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile
index 26f5abc9c3f..e83f34f1b5b 100644
--- a/drivers/pnp/Makefile
+++ b/drivers/pnp/Makefile
@@ -2,12 +2,15 @@
# Makefile for the Linux Plug-and-Play Support.
#
-obj-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o system.o
+obj-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o
obj-$(CONFIG_PNPACPI) += pnpacpi/
obj-$(CONFIG_PNPBIOS) += pnpbios/
obj-$(CONFIG_ISAPNP) += isapnp/
+# pnp_system_init goes after pnpacpi/pnpbios init
+obj-y += system.o
+
ifeq ($(CONFIG_PNP_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index c1b9ea34977..53561d72b4e 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -268,7 +268,7 @@ static int __init pnpacpi_init(void)
return 0;
}
-subsys_initcall(pnpacpi_init);
+fs_initcall(pnpacpi_init);
static int __init pnpacpi_setup(char *str)
{
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 19a4be1a9a3..662dfcddedc 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -571,7 +571,7 @@ static int __init pnpbios_init(void)
return 0;
}
-subsys_initcall(pnpbios_init);
+fs_initcall(pnpbios_init);
static int __init pnpbios_thread_init(void)
{
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index f118252f3a9..52e2743b04e 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -422,6 +422,12 @@ done:
return err;
}
+static int rtc_dev_fasync(int fd, struct file *file, int on)
+{
+ struct rtc_device *rtc = file->private_data;
+ return fasync_helper(fd, file, on, &rtc->async_queue);
+}
+
static int rtc_dev_release(struct inode *inode, struct file *file)
{
struct rtc_device *rtc = file->private_data;
@@ -434,16 +440,13 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
if (rtc->ops->release)
rtc->ops->release(rtc->dev.parent);
+ if (file->f_flags & FASYNC)
+ rtc_dev_fasync(-1, file, 0);
+
clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
return 0;
}
-static int rtc_dev_fasync(int fd, struct file *file, int on)
-{
- struct rtc_device *rtc = file->private_data;
- return fasync_helper(fd, file, on, &rtc->async_queue);
-}
-
static const struct file_operations rtc_dev_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index acb78017e7d..0a225ccda02 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -215,7 +215,7 @@ static int dasd_state_known_to_basic(struct dasd_device *device)
return rc;
}
/* register 'device' debug area, used for all DBF_DEV_XXX calls */
- device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 1,
+ device->debug_area = debug_register(dev_name(&device->cdev->dev), 1, 1,
8 * sizeof(long));
debug_register_view(device->debug_area, &debug_sprintf_view);
debug_set_level(device->debug_area, DBF_WARNING);
@@ -933,7 +933,7 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
MESSAGE(KERN_DEBUG,
"invalid status in handle_killed_request: "
"bus_id %s, status %02x",
- cdev->dev.bus_id, cqr->status);
+ dev_name(&cdev->dev), cqr->status);
return;
}
@@ -942,7 +942,7 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
device != dasd_device_from_cdev_locked(cdev) ||
strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
- cdev->dev.bus_id);
+ dev_name(&cdev->dev));
return;
}
@@ -982,11 +982,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
break;
case -ETIMEDOUT:
printk(KERN_WARNING"%s(%s): request timed out\n",
- __func__, cdev->dev.bus_id);
+ __func__, dev_name(&cdev->dev));
break;
default:
printk(KERN_WARNING"%s(%s): unknown error %ld\n",
- __func__, cdev->dev.bus_id, PTR_ERR(irb));
+ __func__, dev_name(&cdev->dev), PTR_ERR(irb));
}
dasd_handle_killed_request(cdev, intparm);
return;
@@ -995,7 +995,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
now = get_clock();
DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
- cdev->dev.bus_id, ((irb->scsw.cmd.cstat << 8) |
+ dev_name(&cdev->dev), ((irb->scsw.cmd.cstat << 8) |
irb->scsw.cmd.dstat), (unsigned int) intparm);
/* check for unsolicited interrupts */
@@ -1019,7 +1019,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (!device ||
strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
- cdev->dev.bus_id);
+ dev_name(&cdev->dev));
return;
}
@@ -1037,7 +1037,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (cqr->status != DASD_CQR_IN_IO) {
MESSAGE(KERN_DEBUG,
"invalid status: bus_id %s, status %02x",
- cdev->dev.bus_id, cqr->status);
+ dev_name(&cdev->dev), cqr->status);
return;
}
DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
@@ -2134,14 +2134,14 @@ int dasd_generic_probe(struct ccw_device *cdev,
if (ret) {
printk(KERN_WARNING
"dasd_generic_probe: could not set ccw-device options "
- "for %s\n", cdev->dev.bus_id);
+ "for %s\n", dev_name(&cdev->dev));
return ret;
}
ret = dasd_add_sysfs_files(cdev);
if (ret) {
printk(KERN_WARNING
"dasd_generic_probe: could not add sysfs entries "
- "for %s\n", cdev->dev.bus_id);
+ "for %s\n", dev_name(&cdev->dev));
return ret;
}
cdev->handler = &dasd_int_handler;
@@ -2152,13 +2152,13 @@ int dasd_generic_probe(struct ccw_device *cdev,
* initial probe.
*/
if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
- (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0))
+ (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
ret = ccw_device_set_online(cdev);
if (ret)
printk(KERN_WARNING
"dasd_generic_probe: could not initially "
"online ccw-device %s; return code: %d\n",
- cdev->dev.bus_id, ret);
+ dev_name(&cdev->dev), ret);
return 0;
}
@@ -2224,7 +2224,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
printk (KERN_WARNING
"dasd_generic couldn't online device %s "
"- discipline DIAG not available\n",
- cdev->dev.bus_id);
+ dev_name(&cdev->dev));
dasd_delete_device(device);
return -ENODEV;
}
@@ -2248,7 +2248,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
printk (KERN_WARNING
"dasd_generic couldn't online device %s "
"with discipline %s rc=%i\n",
- cdev->dev.bus_id, discipline->name, rc);
+ dev_name(&cdev->dev), discipline->name, rc);
module_put(discipline->owner);
module_put(base_discipline->owner);
dasd_delete_device(device);
@@ -2259,7 +2259,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
if (device->state <= DASD_STATE_KNOWN) {
printk (KERN_WARNING
"dasd_generic discipline not found for %s\n",
- cdev->dev.bus_id);
+ dev_name(&cdev->dev));
rc = -ENODEV;
dasd_set_target_state(device, DASD_STATE_NEW);
if (device->block)
@@ -2267,7 +2267,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
dasd_delete_device(device);
} else
pr_debug("dasd_generic device %s found\n",
- cdev->dev.bus_id);
+ dev_name(&cdev->dev));
/* FIXME: we have to wait for the root device but we don't want
* to wait for each single device but for all at once. */
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 5c6e6f331cb..b8f9c00633f 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1397,7 +1397,7 @@ static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
DEV_MESSAGE(KERN_ERR, cqr->startdev,
"ERP on alias device for request %p,"
" recover on base device %s", cqr,
- cqr->block->base->cdev->dev.bus_id);
+ dev_name(&cqr->block->base->cdev->dev));
}
dasd_eckd_reset_ccw_to_base_io(cqr);
erp->startdev = cqr->block->base;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index cd3335c1c30..921443b01d1 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -515,9 +515,9 @@ dasd_devmap_from_cdev(struct ccw_device *cdev)
{
struct dasd_devmap *devmap;
- devmap = dasd_find_busid(cdev->dev.bus_id);
+ devmap = dasd_find_busid(dev_name(&cdev->dev));
if (IS_ERR(devmap))
- devmap = dasd_add_busid(cdev->dev.bus_id,
+ devmap = dasd_add_busid(dev_name(&cdev->dev),
DASD_FEATURE_DEFAULT);
return devmap;
}
@@ -584,7 +584,7 @@ dasd_delete_device(struct dasd_device *device)
unsigned long flags;
/* First remove device pointer from devmap. */
- devmap = dasd_find_busid(device->cdev->dev.bus_id);
+ devmap = dasd_find_busid(dev_name(&device->cdev->dev));
BUG_ON(IS_ERR(devmap));
spin_lock(&dasd_devmap_lock);
if (devmap->device != device) {
@@ -674,7 +674,7 @@ dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
struct dasd_devmap *devmap;
int ro_flag;
- devmap = dasd_find_busid(dev->bus_id);
+ devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap))
ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0;
else
@@ -723,7 +723,7 @@ dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
struct dasd_devmap *devmap;
int erplog;
- devmap = dasd_find_busid(dev->bus_id);
+ devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap))
erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
else
@@ -770,7 +770,7 @@ dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
struct dasd_devmap *devmap;
int use_diag;
- devmap = dasd_find_busid(dev->bus_id);
+ devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap))
use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0;
else
@@ -876,7 +876,7 @@ dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
struct dasd_devmap *devmap;
int alias;
- devmap = dasd_find_busid(dev->bus_id);
+ devmap = dasd_find_busid(dev_name(dev));
spin_lock(&dasd_devmap_lock);
if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
spin_unlock(&dasd_devmap_lock);
@@ -899,7 +899,7 @@ dasd_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
struct dasd_devmap *devmap;
char *vendor;
- devmap = dasd_find_busid(dev->bus_id);
+ devmap = dasd_find_busid(dev_name(dev));
spin_lock(&dasd_devmap_lock);
if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0)
vendor = devmap->uid.vendor;
@@ -924,7 +924,7 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
char ua_string[3];
struct dasd_uid *uid;
- devmap = dasd_find_busid(dev->bus_id);
+ devmap = dasd_find_busid(dev_name(dev));
spin_lock(&dasd_devmap_lock);
if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
spin_unlock(&dasd_devmap_lock);
@@ -972,7 +972,7 @@ dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf)
struct dasd_devmap *devmap;
int eer_flag;
- devmap = dasd_find_busid(dev->bus_id);
+ devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap) && devmap->device)
eer_flag = dasd_eer_enabled(devmap->device);
else
@@ -1034,7 +1034,7 @@ dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
{
struct dasd_devmap *devmap;
- devmap = dasd_find_busid(cdev->dev.bus_id);
+ devmap = dasd_find_busid(dev_name(&cdev->dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
spin_lock(&dasd_devmap_lock);
@@ -1057,7 +1057,7 @@ dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
{
struct dasd_devmap *devmap;
- devmap = dasd_find_busid(cdev->dev.bus_id);
+ devmap = dasd_find_busid(dev_name(&cdev->dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
@@ -1077,7 +1077,7 @@ dasd_get_feature(struct ccw_device *cdev, int feature)
{
struct dasd_devmap *devmap;
- devmap = dasd_find_busid(cdev->dev.bus_id);
+ devmap = dasd_find_busid(dev_name(&cdev->dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
@@ -1093,7 +1093,7 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
{
struct dasd_devmap *devmap;
- devmap = dasd_find_busid(cdev->dev.bus_id);
+ devmap = dasd_find_busid(dev_name(&cdev->dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 773b3fe275b..49f9d221e23 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -6,6 +6,8 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
+ * Author.........: Nigel Hislop <hislop_nigel@emc.com>
*
*/
@@ -84,7 +86,7 @@ dasd_eckd_probe (struct ccw_device *cdev)
if (ret) {
printk(KERN_WARNING
"dasd_eckd_probe: could not set ccw-device options "
- "for %s\n", cdev->dev.bus_id);
+ "for %s\n", dev_name(&cdev->dev));
return ret;
}
ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
@@ -1501,12 +1503,27 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
return;
}
- /* just report other unsolicited interrupts */
- DEV_MESSAGE(KERN_DEBUG, device, "%s",
- "unsolicited interrupt received");
- device->discipline->dump_sense(device, NULL, irb);
- dasd_schedule_device_bh(device);
+ if ((irb->scsw.cmd.cc == 1) &&
+ (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
+ (irb->scsw.cmd.actl & SCSW_ACTL_START_PEND) &&
+ (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND)) {
+ /* fake irb do nothing, they are handled elsewhere */
+ dasd_schedule_device_bh(device);
+ return;
+ }
+
+ if (!(irb->esw.esw0.erw.cons)) {
+ /* just report other unsolicited interrupts */
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "unsolicited interrupt received");
+ } else {
+ DEV_MESSAGE(KERN_ERR, device, "%s",
+ "unsolicited interrupt received "
+ "(sense available)");
+ device->discipline->dump_sense(device, NULL, irb);
+ }
+ dasd_schedule_device_bh(device);
return;
};
@@ -2068,6 +2085,103 @@ dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
return 0;
}
+/*
+ * Issue syscall I/O to EMC Symmetrix array.
+ * CCWs are PSF and RSSD
+ */
+static int dasd_symm_io(struct dasd_device *device, void __user *argp)
+{
+ struct dasd_symmio_parms usrparm;
+ char *psf_data, *rssd_result;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ /* Copy parms from caller */
+ rc = -EFAULT;
+ if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
+ goto out;
+#ifndef CONFIG_64BIT
+ /* Make sure pointers are sane even on 31 bit. */
+ if ((usrparm.psf_data >> 32) != 0 || (usrparm.rssd_result >> 32) != 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+#endif
+ /* alloc I/O data area */
+ psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
+ rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
+ if (!psf_data || !rssd_result) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+
+ /* get syscall header from user space */
+ rc = -EFAULT;
+ if (copy_from_user(psf_data,
+ (void __user *)(unsigned long) usrparm.psf_data,
+ usrparm.psf_data_len))
+ goto out_free;
+
+ /* sanity check on syscall header */
+ if (psf_data[0] != 0x17 && psf_data[1] != 0xce) {
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ /* setup CCWs for PSF + RSSD */
+ cqr = dasd_smalloc_request("ECKD", 2 , 0, device);
+ if (IS_ERR(cqr)) {
+ DEV_MESSAGE(KERN_WARNING, device, "%s",
+ "Could not allocate initialization request");
+ rc = PTR_ERR(cqr);
+ goto out_free;
+ }
+
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->retries = 3;
+ cqr->expires = 10 * HZ;
+ cqr->buildclk = get_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ /* Build the ccws */
+ ccw = cqr->cpaddr;
+
+ /* PSF ccw */
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = usrparm.psf_data_len;
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->cda = (__u32)(addr_t) psf_data;
+
+ ccw++;
+
+ /* RSSD ccw */
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = usrparm.rssd_result_len;
+ ccw->flags = CCW_FLAG_SLI ;
+ ccw->cda = (__u32)(addr_t) rssd_result;
+
+ rc = dasd_sleep_on(cqr);
+ if (rc)
+ goto out_sfree;
+
+ rc = -EFAULT;
+ if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
+ rssd_result, usrparm.rssd_result_len))
+ goto out_sfree;
+ rc = 0;
+
+out_sfree:
+ dasd_sfree_request(cqr, cqr->memdev);
+out_free:
+ kfree(rssd_result);
+ kfree(psf_data);
+out:
+ DBF_DEV_EVENT(DBF_WARNING, device, "Symmetrix ioctl: rc=%d", rc);
+ return rc;
+}
+
static int
dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
{
@@ -2086,6 +2200,8 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
return dasd_eckd_reserve(device);
case BIODASDSLCK:
return dasd_eckd_steal_lock(device);
+ case BIODASDSYMMIO:
+ return dasd_symm_io(device, argp);
default:
return -ENOIOCTLCMD;
}
@@ -2145,13 +2261,13 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
/* dump the sense data */
len = sprintf(page, KERN_ERR PRINTK_HEADER
" I/O status report for device %s:\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req,
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing CCW: %p\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index bf512ac75b9..892e2878d61 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -309,7 +309,8 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
do_gettimeofday(&tv);
header.tv_sec = tv.tv_sec;
header.tv_usec = tv.tv_usec;
- strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
+ strncpy(header.busid, dev_name(&device->cdev->dev),
+ DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
list_for_each_entry(eerb, &bufferlist, list) {
@@ -349,7 +350,8 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
do_gettimeofday(&tv);
header.tv_sec = tv.tv_sec;
header.tv_usec = tv.tv_usec;
- strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
+ strncpy(header.busid, dev_name(&device->cdev->dev),
+ DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
list_for_each_entry(eerb, &bufferlist, list) {
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index aa0c533423a..93d9b6452a9 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -451,13 +451,13 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
}
len = sprintf(page, KERN_ERR PRINTK_HEADER
" I/O status report for device %s:\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req,
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
len += sprintf(page + len, KERN_ERR PRINTK_HEADER
" device %s: Failing CCW: %p\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 31ecaa4a40e..489d5fe488f 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -126,7 +126,7 @@ do { \
#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
do { \
printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
- d_device->cdev->dev.bus_id, d_args); \
+ dev_name(&d_device->cdev->dev), d_args); \
DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
} while(0)
@@ -140,7 +140,7 @@ do { \
#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
do { \
printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
- d_device->cdev->dev.bus_id, d_args); \
+ dev_name(&d_device->cdev->dev), d_args); \
} while(0)
#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 03c0e40a92f..9088de84b45 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -67,7 +67,7 @@ dasd_devices_show(struct seq_file *m, void *v)
return 0;
}
/* Print device number. */
- seq_printf(m, "%s", device->cdev->dev.bus_id);
+ seq_printf(m, "%s", dev_name(&device->cdev->dev));
/* Print discipline string. */
if (device != NULL && device->discipline != NULL)
seq_printf(m, "(%s)", device->discipline->name);
@@ -76,7 +76,8 @@ dasd_devices_show(struct seq_file *m, void *v)
/* Print kdev. */
if (block->gdp)
seq_printf(m, " at (%3d:%6d)",
- block->gdp->major, block->gdp->first_minor);
+ MAJOR(disk_devt(block->gdp)),
+ MINOR(disk_devt(block->gdp)));
else
seq_printf(m, " at (???:??????)");
/* Print device name. */
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 711b3004b3e..a7ff167d5b8 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -31,7 +31,6 @@
#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
-
static int dcssblk_open(struct inode *inode, struct file *filp);
static int dcssblk_release(struct inode *inode, struct file *filp);
static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
@@ -48,6 +47,30 @@ static struct block_device_operations dcssblk_devops = {
.direct_access = dcssblk_direct_access,
};
+struct dcssblk_dev_info {
+ struct list_head lh;
+ struct device dev;
+ char segment_name[BUS_ID_SIZE];
+ atomic_t use_count;
+ struct gendisk *gd;
+ unsigned long start;
+ unsigned long end;
+ int segment_type;
+ unsigned char save_pending;
+ unsigned char is_shared;
+ struct request_queue *dcssblk_queue;
+ int num_of_segments;
+ struct list_head seg_list;
+};
+
+struct segment_info {
+ struct list_head lh;
+ char segment_name[BUS_ID_SIZE];
+ unsigned long start;
+ unsigned long end;
+ int segment_type;
+};
+
static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf,
size_t count);
static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf,
@@ -58,30 +81,20 @@ static ssize_t dcssblk_save_show(struct device *dev, struct device_attribute *at
static ssize_t dcssblk_shared_store(struct device * dev, struct device_attribute *attr, const char * buf,
size_t count);
static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf);
+static ssize_t dcssblk_seglist_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
-static DEVICE_ATTR(save, S_IWUSR | S_IRUGO, dcssblk_save_show,
+static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
dcssblk_save_store);
-static DEVICE_ATTR(shared, S_IWUSR | S_IRUGO, dcssblk_shared_show,
+static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
dcssblk_shared_store);
+static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
static struct device *dcssblk_root_dev;
-struct dcssblk_dev_info {
- struct list_head lh;
- struct device dev;
- char segment_name[BUS_ID_SIZE];
- atomic_t use_count;
- struct gendisk *gd;
- unsigned long start;
- unsigned long end;
- int segment_type;
- unsigned char save_pending;
- unsigned char is_shared;
- struct request_queue *dcssblk_queue;
-};
-
static LIST_HEAD(dcssblk_devices);
static struct rw_semaphore dcssblk_devices_sem;
@@ -91,8 +104,15 @@ static struct rw_semaphore dcssblk_devices_sem;
static void
dcssblk_release_segment(struct device *dev)
{
- PRINT_DEBUG("segment release fn called for %s\n", dev->bus_id);
- kfree(container_of(dev, struct dcssblk_dev_info, dev));
+ struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry, *temp;
+
+ dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+ list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) {
+ list_del(&entry->lh);
+ kfree(entry);
+ }
+ kfree(dev_info);
module_put(THIS_MODULE);
}
@@ -114,7 +134,7 @@ dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
found = 0;
// test if minor available
list_for_each_entry(entry, &dcssblk_devices, lh)
- if (minor == entry->gd->first_minor)
+ if (minor == MINOR(disk_devt(entry->gd)))
found++;
if (!found) break; // got unused minor
}
@@ -142,6 +162,169 @@ dcssblk_get_device_by_name(char *name)
return NULL;
}
+/*
+ * get the struct segment_info from seg_list
+ * for the given name.
+ * down_read(&dcssblk_devices_sem) must be held.
+ */
+static struct segment_info *
+dcssblk_get_segment_by_name(char *name)
+{
+ struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry;
+
+ list_for_each_entry(dev_info, &dcssblk_devices, lh) {
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ if (!strcmp(name, entry->segment_name))
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * get the highest address of the multi-segment block.
+ */
+static unsigned long
+dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info)
+{
+ unsigned long highest_addr;
+ struct segment_info *entry;
+
+ highest_addr = 0;
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ if (highest_addr < entry->end)
+ highest_addr = entry->end;
+ }
+ return highest_addr;
+}
+
+/*
+ * get the lowest address of the multi-segment block.
+ */
+static unsigned long
+dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info)
+{
+ int set_first;
+ unsigned long lowest_addr;
+ struct segment_info *entry;
+
+ set_first = 0;
+ lowest_addr = 0;
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ if (set_first == 0) {
+ lowest_addr = entry->start;
+ set_first = 1;
+ } else {
+ if (lowest_addr > entry->start)
+ lowest_addr = entry->start;
+ }
+ }
+ return lowest_addr;
+}
+
+/*
+ * Check continuity of segments.
+ */
+static int
+dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
+{
+ int i, j, rc;
+ struct segment_info *sort_list, *entry, temp;
+
+ if (dev_info->num_of_segments <= 1)
+ return 0;
+
+ sort_list = kzalloc(
+ sizeof(struct segment_info) * dev_info->num_of_segments,
+ GFP_KERNEL);
+ if (sort_list == NULL)
+ return -ENOMEM;
+ i = 0;
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ memcpy(&sort_list[i], entry, sizeof(struct segment_info));
+ i++;
+ }
+
+ /* sort segments */
+ for (i = 0; i < dev_info->num_of_segments; i++)
+ for (j = 0; j < dev_info->num_of_segments; j++)
+ if (sort_list[j].start > sort_list[i].start) {
+ memcpy(&temp, &sort_list[i],
+ sizeof(struct segment_info));
+ memcpy(&sort_list[i], &sort_list[j],
+ sizeof(struct segment_info));
+ memcpy(&sort_list[j], &temp,
+ sizeof(struct segment_info));
+ }
+
+ /* check continuity */
+ for (i = 0; i < dev_info->num_of_segments - 1; i++) {
+ if ((sort_list[i].end + 1) != sort_list[i+1].start) {
+ PRINT_ERR("Segment %s is not contiguous with "
+ "segment %s\n",
+ sort_list[i].segment_name,
+ sort_list[i+1].segment_name);
+ rc = -EINVAL;
+ goto out;
+ }
+ /* EN and EW are allowed in a block device */
+ if (sort_list[i].segment_type != sort_list[i+1].segment_type) {
+ if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) ||
+ (sort_list[i].segment_type == SEG_TYPE_ER) ||
+ !(sort_list[i+1].segment_type &
+ SEGMENT_EXCLUSIVE) ||
+ (sort_list[i+1].segment_type == SEG_TYPE_ER)) {
+ PRINT_ERR("Segment %s has different type from "
+ "segment %s\n",
+ sort_list[i].segment_name,
+ sort_list[i+1].segment_name);
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+ }
+ rc = 0;
+out:
+ kfree(sort_list);
+ return rc;
+}
+
+/*
+ * Load a segment
+ */
+static int
+dcssblk_load_segment(char *name, struct segment_info **seg_info)
+{
+ int rc;
+
+ /* already loaded? */
+ down_read(&dcssblk_devices_sem);
+ *seg_info = dcssblk_get_segment_by_name(name);
+ up_read(&dcssblk_devices_sem);
+ if (*seg_info != NULL)
+ return -EEXIST;
+
+ /* get a struct segment_info */
+ *seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL);
+ if (*seg_info == NULL)
+ return -ENOMEM;
+
+ strcpy((*seg_info)->segment_name, name);
+
+ /* load the segment */
+ rc = segment_load(name, SEGMENT_SHARED,
+ &(*seg_info)->start, &(*seg_info)->end);
+ if (rc < 0) {
+ segment_warning(rc, (*seg_info)->segment_name);
+ kfree(*seg_info);
+ } else {
+ INIT_LIST_HEAD(&(*seg_info)->lh);
+ (*seg_info)->segment_type = rc;
+ }
+ return rc;
+}
+
static void dcssblk_unregister_callback(struct device *dev)
{
device_unregister(dev);
@@ -165,6 +348,7 @@ static ssize_t
dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
{
struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry, *temp;
int rc;
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
@@ -172,46 +356,46 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
down_write(&dcssblk_devices_sem);
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
if (atomic_read(&dev_info->use_count)) {
- PRINT_ERR("share: segment %s is busy!\n",
- dev_info->segment_name);
rc = -EBUSY;
goto out;
}
if (inbuf[0] == '1') {
- // reload segment in shared mode
- rc = segment_modify_shared(dev_info->segment_name,
- SEGMENT_SHARED);
- if (rc < 0) {
- BUG_ON(rc == -EINVAL);
- if (rc != -EAGAIN)
- goto removeseg;
- } else {
- dev_info->is_shared = 1;
- switch (dev_info->segment_type) {
- case SEG_TYPE_SR:
- case SEG_TYPE_ER:
- case SEG_TYPE_SC:
- set_disk_ro(dev_info->gd,1);
+ /* reload segments in shared mode */
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ rc = segment_modify_shared(entry->segment_name,
+ SEGMENT_SHARED);
+ if (rc < 0) {
+ BUG_ON(rc == -EINVAL);
+ if (rc != -EAGAIN)
+ goto removeseg;
}
}
+ dev_info->is_shared = 1;
+ switch (dev_info->segment_type) {
+ case SEG_TYPE_SR:
+ case SEG_TYPE_ER:
+ case SEG_TYPE_SC:
+ set_disk_ro(dev_info->gd, 1);
+ }
} else if (inbuf[0] == '0') {
- // reload segment in exclusive mode
+ /* reload segments in exclusive mode */
if (dev_info->segment_type == SEG_TYPE_SC) {
PRINT_ERR("Segment type SC (%s) cannot be loaded in "
- "non-shared mode\n", dev_info->segment_name);
+ "non-shared mode\n", dev_info->segment_name);
rc = -EINVAL;
goto out;
}
- rc = segment_modify_shared(dev_info->segment_name,
- SEGMENT_EXCLUSIVE);
- if (rc < 0) {
- BUG_ON(rc == -EINVAL);
- if (rc != -EAGAIN)
- goto removeseg;
- } else {
- dev_info->is_shared = 0;
- set_disk_ro(dev_info->gd, 0);
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ rc = segment_modify_shared(entry->segment_name,
+ SEGMENT_EXCLUSIVE);
+ if (rc < 0) {
+ BUG_ON(rc == -EINVAL);
+ if (rc != -EAGAIN)
+ goto removeseg;
+ }
}
+ dev_info->is_shared = 0;
+ set_disk_ro(dev_info->gd, 0);
} else {
rc = -EINVAL;
goto out;
@@ -220,8 +404,14 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
goto out;
removeseg:
- PRINT_ERR("Could not reload segment %s, removing it now!\n",
- dev_info->segment_name);
+ PRINT_ERR("Could not reload segment(s) of the device %s, removing "
+ "segment(s) now!\n",
+ dev_info->segment_name);
+ temp = entry;
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ if (entry != temp)
+ segment_unload(entry->segment_name);
+ }
list_del(&dev_info->lh);
del_gendisk(dev_info->gd);
@@ -254,6 +444,7 @@ static ssize_t
dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
{
struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry;
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
return -EINVAL;
@@ -263,14 +454,16 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
if (inbuf[0] == '1') {
if (atomic_read(&dev_info->use_count) == 0) {
// device is idle => we save immediately
- PRINT_INFO("Saving segment %s\n",
+ PRINT_INFO("Saving segment(s) of the device %s\n",
dev_info->segment_name);
- segment_save(dev_info->segment_name);
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ segment_save(entry->segment_name);
+ }
} else {
// device is busy => we save it when it becomes
// idle in dcssblk_release
- PRINT_INFO("Segment %s is currently busy, it will "
- "be saved when it becomes idle...\n",
+ PRINT_INFO("Device %s is currently busy, segment(s) "
+ "will be saved when it becomes idle...\n",
dev_info->segment_name);
dev_info->save_pending = 1;
}
@@ -279,7 +472,8 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
// device is busy & the user wants to undo his save
// request
dev_info->save_pending = 0;
- PRINT_INFO("Pending save for segment %s deactivated\n",
+ PRINT_INFO("Pending save for segment(s) of the device "
+ "%s deactivated\n",
dev_info->segment_name);
}
} else {
@@ -291,66 +485,123 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
}
/*
+ * device attribute for showing all segments in a device
+ */
+static ssize_t
+dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+
+ struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry;
+
+ down_read(&dcssblk_devices_sem);
+ dev_info = container_of(dev, struct dcssblk_dev_info, dev);
+ i = 0;
+ buf[0] = '\0';
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ strcpy(&buf[i], entry->segment_name);
+ i += strlen(entry->segment_name);
+ buf[i] = '\n';
+ i++;
+ }
+ up_read(&dcssblk_devices_sem);
+ return i;
+}
+
+/*
* device attribute for adding devices
*/
static ssize_t
dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
- int rc, i;
+ int rc, i, j, num_of_segments;
struct dcssblk_dev_info *dev_info;
+ struct segment_info *seg_info, *temp;
char *local_buf;
unsigned long seg_byte_size;
dev_info = NULL;
+ seg_info = NULL;
if (dev != dcssblk_root_dev) {
rc = -EINVAL;
goto out_nobuf;
}
+ if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) {
+ rc = -ENAMETOOLONG;
+ goto out_nobuf;
+ }
+
local_buf = kmalloc(count + 1, GFP_KERNEL);
if (local_buf == NULL) {
rc = -ENOMEM;
goto out_nobuf;
}
+
/*
* parse input
*/
+ num_of_segments = 0;
for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
- local_buf[i] = toupper(buf[i]);
+ for (j = i; (buf[j] != ':') &&
+ (buf[j] != '\0') &&
+ (buf[j] != '\n') &&
+ j < count; j++) {
+ local_buf[j-i] = toupper(buf[j]);
+ }
+ local_buf[j-i] = '\0';
+ if (((j - i) == 0) || ((j - i) > 8)) {
+ rc = -ENAMETOOLONG;
+ goto seg_list_del;
+ }
+
+ rc = dcssblk_load_segment(local_buf, &seg_info);
+ if (rc < 0)
+ goto seg_list_del;
+ /*
+ * get a struct dcssblk_dev_info
+ */
+ if (num_of_segments == 0) {
+ dev_info = kzalloc(sizeof(struct dcssblk_dev_info),
+ GFP_KERNEL);
+ if (dev_info == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ strcpy(dev_info->segment_name, local_buf);
+ dev_info->segment_type = seg_info->segment_type;
+ INIT_LIST_HEAD(&dev_info->seg_list);
+ }
+ list_add_tail(&seg_info->lh, &dev_info->seg_list);
+ num_of_segments++;
+ i = j;
+
+ if ((buf[j] == '\0') || (buf[j] == '\n'))
+ break;
}
- local_buf[i] = '\0';
- if ((i == 0) || (i > 8)) {
+
+ /* no trailing colon at the end of the input */
+ if ((i > 0) && (buf[i-1] == ':')) {
rc = -ENAMETOOLONG;
- goto out;
- }
- /*
- * already loaded?
- */
- down_read(&dcssblk_devices_sem);
- dev_info = dcssblk_get_device_by_name(local_buf);
- up_read(&dcssblk_devices_sem);
- if (dev_info != NULL) {
- PRINT_WARN("Segment %s already loaded!\n", local_buf);
- rc = -EEXIST;
- goto out;
- }
- /*
- * get a struct dcssblk_dev_info
- */
- dev_info = kzalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL);
- if (dev_info == NULL) {
- rc = -ENOMEM;
- goto out;
+ goto seg_list_del;
}
+ strlcpy(local_buf, buf, i + 1);
+ dev_info->num_of_segments = num_of_segments;
+ rc = dcssblk_is_continuous(dev_info);
+ if (rc < 0)
+ goto seg_list_del;
+
+ dev_info->start = dcssblk_find_lowest_addr(dev_info);
+ dev_info->end = dcssblk_find_highest_addr(dev_info);
- strcpy(dev_info->segment_name, local_buf);
- strlcpy(dev_info->dev.bus_id, local_buf, BUS_ID_SIZE);
+ dev_set_name(&dev_info->dev, dev_info->segment_name);
dev_info->dev.release = dcssblk_release_segment;
INIT_LIST_HEAD(&dev_info->lh);
-
dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
if (dev_info->gd == NULL) {
rc = -ENOMEM;
- goto free_dev_info;
+ goto seg_list_del;
}
dev_info->gd->major = dcssblk_major;
dev_info->gd->fops = &dcssblk_devops;
@@ -360,59 +611,43 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->driverfs_dev = &dev_info->dev;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
- /*
- * load the segment
- */
- rc = segment_load(local_buf, SEGMENT_SHARED,
- &dev_info->start, &dev_info->end);
- if (rc < 0) {
- segment_warning(rc, dev_info->segment_name);
- goto dealloc_gendisk;
- }
+
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
- PRINT_INFO("Loaded segment %s, size = %lu Byte, "
+ PRINT_INFO("Loaded segment(s) %s, size = %lu Byte, "
"capacity = %lu (512 Byte) sectors\n", local_buf,
seg_byte_size, seg_byte_size >> 9);
- dev_info->segment_type = rc;
dev_info->save_pending = 0;
dev_info->is_shared = 1;
dev_info->dev.parent = dcssblk_root_dev;
/*
- * get minor, add to list
+ *get minor, add to list
*/
down_write(&dcssblk_devices_sem);
- if (dcssblk_get_device_by_name(local_buf)) {
- up_write(&dcssblk_devices_sem);
+ if (dcssblk_get_segment_by_name(local_buf)) {
rc = -EEXIST;
- goto unload_seg;
+ goto release_gd;
}
rc = dcssblk_assign_free_minor(dev_info);
- if (rc) {
- up_write(&dcssblk_devices_sem);
- PRINT_ERR("No free minor number available! "
- "Unloading segment...\n");
- goto unload_seg;
- }
+ if (rc)
+ goto release_gd;
sprintf(dev_info->gd->disk_name, "dcssblk%d",
- dev_info->gd->first_minor);
+ MINOR(disk_devt(dev_info->gd)));
list_add_tail(&dev_info->lh, &dcssblk_devices);
if (!try_module_get(THIS_MODULE)) {
rc = -ENODEV;
- goto list_del;
+ goto dev_list_del;
}
/*
* register the device
*/
rc = device_register(&dev_info->dev);
if (rc) {
- PRINT_ERR("Segment %s could not be registered RC=%d\n",
- local_buf, rc);
module_put(THIS_MODULE);
- goto list_del;
+ goto dev_list_del;
}
get_device(&dev_info->dev);
rc = device_create_file(&dev_info->dev, &dev_attr_shared);
@@ -421,6 +656,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
rc = device_create_file(&dev_info->dev, &dev_attr_save);
if (rc)
goto unregister_dev;
+ rc = device_create_file(&dev_info->dev, &dev_attr_seglist);
+ if (rc)
+ goto unregister_dev;
add_disk(dev_info->gd);
@@ -434,7 +672,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
set_disk_ro(dev_info->gd,0);
break;
}
- PRINT_DEBUG("Segment %s loaded successfully\n", local_buf);
up_write(&dcssblk_devices_sem);
rc = count;
goto out;
@@ -445,20 +682,27 @@ unregister_dev:
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
device_unregister(&dev_info->dev);
- segment_unload(dev_info->segment_name);
+ list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
+ segment_unload(seg_info->segment_name);
+ }
put_device(&dev_info->dev);
up_write(&dcssblk_devices_sem);
goto out;
-list_del:
+dev_list_del:
list_del(&dev_info->lh);
- up_write(&dcssblk_devices_sem);
-unload_seg:
- segment_unload(local_buf);
-dealloc_gendisk:
+release_gd:
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
-free_dev_info:
+ up_write(&dcssblk_devices_sem);
+seg_list_del:
+ if (dev_info == NULL)
+ goto out;
+ list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) {
+ list_del(&seg_info->lh);
+ segment_unload(seg_info->segment_name);
+ kfree(seg_info);
+ }
kfree(dev_info);
out:
kfree(local_buf);
@@ -473,6 +717,7 @@ static ssize_t
dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry;
int rc, i;
char *local_buf;
@@ -499,26 +744,28 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
dev_info = dcssblk_get_device_by_name(local_buf);
if (dev_info == NULL) {
up_write(&dcssblk_devices_sem);
- PRINT_WARN("Segment %s is not loaded!\n", local_buf);
+ PRINT_WARN("Device %s is not loaded!\n", local_buf);
rc = -ENODEV;
goto out_buf;
}
if (atomic_read(&dev_info->use_count) != 0) {
up_write(&dcssblk_devices_sem);
- PRINT_WARN("Segment %s is in use!\n", local_buf);
+ PRINT_WARN("Device %s is in use!\n", local_buf);
rc = -EBUSY;
goto out_buf;
}
- list_del(&dev_info->lh);
+ list_del(&dev_info->lh);
del_gendisk(dev_info->gd);
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
device_unregister(&dev_info->dev);
- segment_unload(dev_info->segment_name);
- PRINT_DEBUG("Segment %s unloaded successfully\n",
- dev_info->segment_name);
+
+ /* unload all related segments */
+ list_for_each_entry(entry, &dev_info->seg_list, lh)
+ segment_unload(entry->segment_name);
+
put_device(&dev_info->dev);
up_write(&dcssblk_devices_sem);
@@ -550,6 +797,7 @@ static int
dcssblk_release(struct inode *inode, struct file *filp)
{
struct dcssblk_dev_info *dev_info;
+ struct segment_info *entry;
int rc;
dev_info = inode->i_bdev->bd_disk->private_data;
@@ -560,9 +808,11 @@ dcssblk_release(struct inode *inode, struct file *filp)
down_write(&dcssblk_devices_sem);
if (atomic_dec_and_test(&dev_info->use_count)
&& (dev_info->save_pending)) {
- PRINT_INFO("Segment %s became idle and is being saved now\n",
+ PRINT_INFO("Device %s became idle and is being saved now\n",
dev_info->segment_name);
- segment_save(dev_info->segment_name);
+ list_for_each_entry(entry, &dev_info->seg_list, lh) {
+ segment_save(entry->segment_name);
+ }
dev_info->save_pending = 0;
}
up_write(&dcssblk_devices_sem);
@@ -602,7 +852,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
case SEG_TYPE_SC:
/* cannot write to these segments */
if (bio_data_dir(bio) == WRITE) {
- PRINT_WARN("rejecting write to ro segment %s\n", dev_info->dev.bus_id);
+ PRINT_WARN("rejecting write to ro device %s\n",
+ dev_name(&dev_info->dev));
goto fail;
}
}
@@ -657,7 +908,7 @@ static void
dcssblk_check_params(void)
{
int rc, i, j, k;
- char buf[9];
+ char buf[DCSSBLK_PARM_LEN + 1];
struct dcssblk_dev_info *dev_info;
for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
@@ -665,15 +916,16 @@ dcssblk_check_params(void)
for (j = i; (dcssblk_segments[j] != ',') &&
(dcssblk_segments[j] != '\0') &&
(dcssblk_segments[j] != '(') &&
- (j - i) < 8; j++)
+ (j < DCSSBLK_PARM_LEN); j++)
{
buf[j-i] = dcssblk_segments[j];
}
buf[j-i] = '\0';
rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i);
if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
- for (k = 0; buf[k] != '\0'; k++)
+ for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++)
buf[k] = toupper(buf[k]);
+ buf[k] = '\0';
if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
down_read(&dcssblk_devices_sem);
dev_info = dcssblk_get_device_by_name(buf);
@@ -740,10 +992,12 @@ module_exit(dcssblk_exit);
module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
- "comma-separated list, each name max. 8 chars.\n"
- "Adding \"(local)\" to segment name equals echoing 0 to "
- "/sys/devices/dcssblk/<segment name>/shared after loading "
- "the segment - \n"
- "e.g. segments=\"mydcss1,mydcss2,mydcss3(local)\"");
+ "comma-separated list, names in each set separated "
+ "by commas are separated by colons, each set contains "
+ "names of contiguous segments and each name max. 8 chars.\n"
+ "Adding \"(local)\" to the end of each set equals echoing 0 "
+ "to /sys/devices/dcssblk/<device name>/shared after loading "
+ "the contiguous segments - \n"
+ "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\"");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index dd9b986389a..03916989ed2 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -56,6 +56,7 @@ typedef struct {
static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
+static struct request_queue *xpram_queues[XPRAM_MAX_DEVS];
static unsigned int xpram_pages;
static int xpram_devs;
@@ -330,18 +331,22 @@ static int __init xpram_setup_sizes(unsigned long pages)
return 0;
}
-static struct request_queue *xpram_queue;
-
static int __init xpram_setup_blkdev(void)
{
unsigned long offset;
int i, rc = -ENOMEM;
for (i = 0; i < xpram_devs; i++) {
- struct gendisk *disk = alloc_disk(1);
- if (!disk)
+ xpram_disks[i] = alloc_disk(1);
+ if (!xpram_disks[i])
+ goto out;
+ xpram_queues[i] = blk_alloc_queue(GFP_KERNEL);
+ if (!xpram_queues[i]) {
+ put_disk(xpram_disks[i]);
goto out;
- xpram_disks[i] = disk;
+ }
+ blk_queue_make_request(xpram_queues[i], xpram_make_request);
+ blk_queue_hardsect_size(xpram_queues[i], 4096);
}
/*
@@ -352,18 +357,6 @@ static int __init xpram_setup_blkdev(void)
goto out;
/*
- * Assign the other needed values: make request function, sizes and
- * hardsect size. All the minor devices feature the same value.
- */
- xpram_queue = blk_alloc_queue(GFP_KERNEL);
- if (!xpram_queue) {
- rc = -ENOMEM;
- goto out_unreg;
- }
- blk_queue_make_request(xpram_queue, xpram_make_request);
- blk_queue_hardsect_size(xpram_queue, 4096);
-
- /*
* Setup device structures.
*/
offset = 0;
@@ -377,18 +370,18 @@ static int __init xpram_setup_blkdev(void)
disk->first_minor = i;
disk->fops = &xpram_devops;
disk->private_data = &xpram_devices[i];
- disk->queue = xpram_queue;
+ disk->queue = xpram_queues[i];
sprintf(disk->disk_name, "slram%d", i);
set_capacity(disk, xpram_sizes[i] << 1);
add_disk(disk);
}
return 0;
-out_unreg:
- unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
out:
- while (i--)
+ while (i--) {
+ blk_cleanup_queue(xpram_queues[i]);
put_disk(xpram_disks[i]);
+ }
return rc;
}
@@ -400,10 +393,10 @@ static void __exit xpram_exit(void)
int i;
for (i = 0; i < xpram_devs; i++) {
del_gendisk(xpram_disks[i]);
+ blk_cleanup_queue(xpram_queues[i]);
put_disk(xpram_disks[i]);
}
unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
- blk_cleanup_queue(xpram_queue);
}
static int __init xpram_init(void)
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index d3ec9b55ab3..9ab06e0dad4 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -21,6 +21,7 @@
#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/err.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/bootmem.h>
@@ -88,7 +89,6 @@ struct raw3215_info {
int count; /* number of bytes in output buffer */
int written; /* number of bytes in write requests */
struct tty_struct *tty; /* pointer to tty structure if present */
- struct tasklet_struct tasklet;
struct raw3215_req *queued_read; /* pointer to queued read requests */
struct raw3215_req *queued_write;/* pointer to queued write requests */
wait_queue_head_t empty_wait; /* wait queue for flushing */
@@ -341,21 +341,14 @@ raw3215_try_io(struct raw3215_info *raw)
}
/*
- * The bottom half handler routine for 3215 devices. It tries to start
- * the next IO and wakes up processes waiting on the tty.
+ * Try to start the next IO and wake up processes waiting on the tty.
*/
-static void
-raw3215_tasklet(void *data)
+static void raw3215_next_io(struct raw3215_info *raw)
{
- struct raw3215_info *raw;
struct tty_struct *tty;
- unsigned long flags;
- raw = (struct raw3215_info *) data;
- spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_mk_write_req(raw);
raw3215_try_io(raw);
- spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
tty = raw->tty;
if (tty != NULL &&
RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
@@ -380,7 +373,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
if (cstat != 0)
- tasklet_schedule(&raw->tasklet);
+ raw3215_next_io(raw);
if (dstat & 0x01) { /* we got a unit exception */
dstat &= ~0x01; /* we can ignore it */
}
@@ -390,7 +383,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
break;
/* Attention interrupt, someone hit the enter key */
raw3215_mk_read_req(raw);
- tasklet_schedule(&raw->tasklet);
+ raw3215_next_io(raw);
break;
case 0x08:
case 0x0C:
@@ -448,7 +441,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
raw->queued_read == NULL) {
wake_up_interruptible(&raw->empty_wait);
}
- tasklet_schedule(&raw->tasklet);
+ raw3215_next_io(raw);
break;
default:
/* Strange interrupt, I'll do my best to clean up */
@@ -460,7 +453,7 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
raw->flags &= ~RAW3215_WORKING;
raw3215_free_req(req);
}
- tasklet_schedule(&raw->tasklet);
+ raw3215_next_io(raw);
}
return;
}
@@ -674,9 +667,6 @@ raw3215_probe (struct ccw_device *cdev)
kfree(raw);
return -ENOMEM;
}
- tasklet_init(&raw->tasklet,
- (void (*)(unsigned long)) raw3215_tasklet,
- (unsigned long) raw);
init_waitqueue_head(&raw->empty_wait);
cdev->dev.driver_data = raw;
@@ -775,11 +765,11 @@ static struct tty_driver *con3215_device(struct console *c, int *index)
}
/*
- * panic() calls console_unblank before the system enters a
- * disabled, endless loop.
+ * panic() calls con3215_flush through a panic_notifier
+ * before the system enters a disabled, endless loop.
*/
static void
-con3215_unblank(void)
+con3215_flush(void)
{
struct raw3215_info *raw;
unsigned long flags;
@@ -790,6 +780,23 @@ con3215_unblank(void)
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
+static int con3215_notify(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ con3215_flush();
+ return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+ .notifier_call = con3215_notify,
+ .priority = 0,
+};
+
+static struct notifier_block on_reboot_nb = {
+ .notifier_call = con3215_notify,
+ .priority = 0,
+};
+
/*
* The console structure for the 3215 console
*/
@@ -797,7 +804,6 @@ static struct console con3215 = {
.name = "ttyS",
.write = con3215_write,
.device = con3215_device,
- .unblank = con3215_unblank,
.flags = CON_PRINTBUFFER,
};
@@ -846,9 +852,6 @@ con3215_init(void)
cdev->handler = raw3215_irq;
raw->flags |= RAW3215_FIXED;
- tasklet_init(&raw->tasklet,
- (void (*)(unsigned long)) raw3215_tasklet,
- (unsigned long) raw);
init_waitqueue_head(&raw->empty_wait);
/* Request the console irq */
@@ -859,6 +862,8 @@ con3215_init(void)
raw3215[0] = NULL;
return -ENODEV;
}
+ atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+ register_reboot_notifier(&on_reboot_nb);
register_console(&con3215);
return 0;
}
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 3c07974886e..d028d2ee83d 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/types.h>
#include <linux/err.h>
+#include <linux/reboot.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
@@ -528,11 +529,11 @@ con3270_wait_write(struct con3270 *cp)
}
/*
- * panic() calls console_unblank before the system enters a
- * disabled, endless loop.
+ * panic() calls con3270_flush through a panic_notifier
+ * before the system enters a disabled, endless loop.
*/
static void
-con3270_unblank(void)
+con3270_flush(void)
{
struct con3270 *cp;
unsigned long flags;
@@ -554,6 +555,23 @@ con3270_unblank(void)
spin_unlock_irqrestore(&cp->view.lock, flags);
}
+static int con3270_notify(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ con3270_flush();
+ return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+ .notifier_call = con3270_notify,
+ .priority = 0,
+};
+
+static struct notifier_block on_reboot_nb = {
+ .notifier_call = con3270_notify,
+ .priority = 0,
+};
+
/*
* The console structure for the 3270 console
*/
@@ -561,7 +579,6 @@ static struct console con3270 = {
.name = "tty3270",
.write = con3270_write,
.device = con3270_device,
- .unblank = con3270_unblank,
.flags = CON_PRINTBUFFER,
};
@@ -623,6 +640,8 @@ con3270_init(void)
condev->cline->len = 0;
con3270_create_status(condev);
condev->input = alloc_string(&condev->freemem, 80);
+ atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+ register_reboot_notifier(&on_reboot_nb);
register_console(&con3270);
return 0;
}
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index c3dee900a5c..1792b2c0130 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1171,7 +1171,7 @@ static int raw3270_create_attributes(struct raw3270 *rp)
rp->clttydev = device_create_drvdata(class3270, &rp->cdev->dev,
MKDEV(IBM_TTY3270_MAJOR, rp->minor),
NULL,
- "tty%s", rp->cdev->dev.bus_id);
+ "tty%s", dev_name(&rp->cdev->dev));
if (IS_ERR(rp->clttydev)) {
rc = PTR_ERR(rp->clttydev);
goto out_ttydev;
@@ -1180,7 +1180,7 @@ static int raw3270_create_attributes(struct raw3270 *rp)
rp->cltubdev = device_create_drvdata(class3270, &rp->cdev->dev,
MKDEV(IBM_FS3270_MAJOR, rp->minor),
NULL,
- "tub%s", rp->cdev->dev.bus_id);
+ "tub%s", dev_name(&rp->cdev->dev));
if (!IS_ERR(rp->cltubdev))
goto out;
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 7e619c534bf..9a25c4bd142 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -16,6 +16,7 @@
#include <linux/bootmem.h>
#include <linux/termios.h>
#include <linux/err.h>
+#include <linux/reboot.h>
#include "sclp.h"
#include "sclp_rw.h"
@@ -172,7 +173,7 @@ sclp_console_device(struct console *c, int *index)
* will be flushed to the SCLP.
*/
static void
-sclp_console_unblank(void)
+sclp_console_flush(void)
{
unsigned long flags;
@@ -188,6 +189,24 @@ sclp_console_unblank(void)
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
+static int
+sclp_console_notify(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ sclp_console_flush();
+ return NOTIFY_OK;
+}
+
+static struct notifier_block on_panic_nb = {
+ .notifier_call = sclp_console_notify,
+ .priority = 1,
+};
+
+static struct notifier_block on_reboot_nb = {
+ .notifier_call = sclp_console_notify,
+ .priority = 1,
+};
+
/*
* used to register the SCLP console to the kernel and to
* give printk necessary information
@@ -197,7 +216,6 @@ static struct console sclp_console =
.name = sclp_console_name,
.write = sclp_console_write,
.device = sclp_console_device,
- .unblank = sclp_console_unblank,
.flags = CON_PRINTBUFFER,
.index = 0 /* ttyS0 */
};
@@ -241,6 +259,8 @@ sclp_console_init(void)
sclp_con_width_htab = 8;
/* enable printk-access to this driver */
+ atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+ register_reboot_notifier(&on_reboot_nb);
register_console(&sclp_console);
return 0;
}
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index ad51738c426..9854f19f5e6 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -24,6 +24,8 @@
#include <linux/bootmem.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/reboot.h>
+
#include <asm/uaccess.h>
#include "sclp.h"
@@ -743,24 +745,30 @@ sclp_vt220_con_device(struct console *c, int *index)
return sclp_vt220_driver;
}
-/*
- * This routine is called from panic when the kernel is going to give up.
- * We have to make sure that all buffers will be flushed to the SCLP.
- * Note that this function may be called from within an interrupt context.
- */
-static void
-sclp_vt220_con_unblank(void)
+static int
+sclp_vt220_notify(struct notifier_block *self,
+ unsigned long event, void *data)
{
__sclp_vt220_flush_buffer();
+ return NOTIFY_OK;
}
+static struct notifier_block on_panic_nb = {
+ .notifier_call = sclp_vt220_notify,
+ .priority = 1,
+};
+
+static struct notifier_block on_reboot_nb = {
+ .notifier_call = sclp_vt220_notify,
+ .priority = 1,
+};
+
/* Structure needed to register with printk */
static struct console sclp_vt220_console =
{
.name = SCLP_VT220_CONSOLE_NAME,
.write = sclp_vt220_con_write,
.device = sclp_vt220_con_device,
- .unblank = sclp_vt220_con_unblank,
.flags = CON_PRINTBUFFER,
.index = SCLP_VT220_CONSOLE_INDEX
};
@@ -776,6 +784,8 @@ sclp_vt220_con_init(void)
if (rc)
return rc;
/* Attach linux console */
+ atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
+ register_reboot_notifier(&on_reboot_nb);
register_console(&sclp_vt220_console);
return 0;
}
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 839987618ff..4005c44a404 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -910,7 +910,7 @@ tape_3590_erp_swap(struct tape_device *device, struct tape_request *request,
* should proceed with the new tape... this
* should probably be done in user space!
*/
- PRINT_WARN("(%s): Swap Tape Device!\n", device->cdev->dev.bus_id);
+ PRINT_WARN("(%s): Swap Tape Device!\n", dev_name(&device->cdev->dev));
return tape_3590_erp_basic(device, request, irb, -EIO);
}
@@ -1003,40 +1003,43 @@ tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb)
/* Exception Message */
switch (sense->fmt.f70.emc) {
case 0x02:
- PRINT_WARN("(%s): Data degraded\n", device->cdev->dev.bus_id);
+ PRINT_WARN("(%s): Data degraded\n",
+ dev_name(&device->cdev->dev));
break;
case 0x03:
PRINT_WARN("(%s): Data degraded in partion %i\n",
- device->cdev->dev.bus_id, sense->fmt.f70.mp);
+ dev_name(&device->cdev->dev), sense->fmt.f70.mp);
break;
case 0x04:
- PRINT_WARN("(%s): Medium degraded\n", device->cdev->dev.bus_id);
+ PRINT_WARN("(%s): Medium degraded\n",
+ dev_name(&device->cdev->dev));
break;
case 0x05:
PRINT_WARN("(%s): Medium degraded in partition %i\n",
- device->cdev->dev.bus_id, sense->fmt.f70.mp);
+ dev_name(&device->cdev->dev), sense->fmt.f70.mp);
break;
case 0x06:
- PRINT_WARN("(%s): Block 0 Error\n", device->cdev->dev.bus_id);
+ PRINT_WARN("(%s): Block 0 Error\n",
+ dev_name(&device->cdev->dev));
break;
case 0x07:
PRINT_WARN("(%s): Medium Exception 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f70.md);
+ dev_name(&device->cdev->dev), sense->fmt.f70.md);
break;
default:
PRINT_WARN("(%s): MIM ExMsg: 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f70.emc);
+ dev_name(&device->cdev->dev), sense->fmt.f70.emc);
break;
}
/* Service Message */
switch (sense->fmt.f70.smc) {
case 0x02:
PRINT_WARN("(%s): Reference Media maintenance procedure %i\n",
- device->cdev->dev.bus_id, sense->fmt.f70.md);
+ dev_name(&device->cdev->dev), sense->fmt.f70.md);
break;
default:
PRINT_WARN("(%s): MIM ServiceMsg: 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f70.smc);
+ dev_name(&device->cdev->dev), sense->fmt.f70.smc);
break;
}
}
@@ -1054,101 +1057,101 @@ tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb)
switch (sense->fmt.f71.emc) {
case 0x01:
PRINT_WARN("(%s): Effect of failure is unknown\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
case 0x02:
PRINT_WARN("(%s): CU Exception - no performance impact\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
case 0x03:
PRINT_WARN("(%s): CU Exception on channel interface 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.md[0]);
+ dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
break;
case 0x04:
PRINT_WARN("(%s): CU Exception on device path 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.md[0]);
+ dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
break;
case 0x05:
PRINT_WARN("(%s): CU Exception on library path 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.md[0]);
+ dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
break;
case 0x06:
PRINT_WARN("(%s): CU Exception on node 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.md[0]);
+ dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
break;
case 0x07:
PRINT_WARN("(%s): CU Exception on partition 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.md[0]);
+ dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
break;
default:
PRINT_WARN("(%s): SIM ExMsg: 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.emc);
+ dev_name(&device->cdev->dev), sense->fmt.f71.emc);
}
/* Service Message */
switch (sense->fmt.f71.smc) {
case 0x01:
PRINT_WARN("(%s): Repair impact is unknown\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
case 0x02:
PRINT_WARN("(%s): Repair will not impact cu performance\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
case 0x03:
if (sense->fmt.f71.mdf == 0)
PRINT_WARN("(%s): Repair will disable node "
"0x%x on CU\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1]);
else
PRINT_WARN("(%s): Repair will disable nodes "
"(0x%x-0x%x) on CU\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x04:
if (sense->fmt.f71.mdf == 0)
PRINT_WARN("(%s): Repair will disable cannel path "
"0x%x on CU\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1]);
else
PRINT_WARN("(%s): Repair will disable cannel paths "
"(0x%x-0x%x) on CU\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x05:
if (sense->fmt.f71.mdf == 0)
PRINT_WARN("(%s): Repair will disable device path "
"0x%x on CU\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1]);
else
PRINT_WARN("(%s): Repair will disable device paths "
"(0x%x-0x%x) on CU\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x06:
if (sense->fmt.f71.mdf == 0)
PRINT_WARN("(%s): Repair will disable library path "
"0x%x on CU\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1]);
else
PRINT_WARN("(%s): Repair will disable library paths "
"(0x%x-0x%x) on CU\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x07:
PRINT_WARN("(%s): Repair will disable access to CU\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
default:
PRINT_WARN("(%s): SIM ServiceMsg: 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.smc);
+ dev_name(&device->cdev->dev), sense->fmt.f71.smc);
}
}
@@ -1165,104 +1168,104 @@ tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb)
switch (sense->fmt.f71.emc) {
case 0x01:
PRINT_WARN("(%s): Effect of failure is unknown\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
case 0x02:
PRINT_WARN("(%s): DV Exception - no performance impact\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
case 0x03:
PRINT_WARN("(%s): DV Exception on channel interface 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.md[0]);
+ dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
break;
case 0x04:
PRINT_WARN("(%s): DV Exception on loader 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.md[0]);
+ dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
break;
case 0x05:
PRINT_WARN("(%s): DV Exception on message display 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.md[0]);
+ dev_name(&device->cdev->dev), sense->fmt.f71.md[0]);
break;
case 0x06:
PRINT_WARN("(%s): DV Exception in tape path\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
case 0x07:
PRINT_WARN("(%s): DV Exception in drive\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
default:
PRINT_WARN("(%s): DSIM ExMsg: 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.emc);
+ dev_name(&device->cdev->dev), sense->fmt.f71.emc);
}
/* Service Message */
switch (sense->fmt.f71.smc) {
case 0x01:
PRINT_WARN("(%s): Repair impact is unknown\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
case 0x02:
PRINT_WARN("(%s): Repair will not impact device performance\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
case 0x03:
if (sense->fmt.f71.mdf == 0)
PRINT_WARN("(%s): Repair will disable channel path "
"0x%x on DV\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1]);
else
PRINT_WARN("(%s): Repair will disable channel path "
"(0x%x-0x%x) on DV\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x04:
if (sense->fmt.f71.mdf == 0)
PRINT_WARN("(%s): Repair will disable interface 0x%x "
"on DV\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1]);
else
PRINT_WARN("(%s): Repair will disable interfaces "
"(0x%x-0x%x) on DV\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x05:
if (sense->fmt.f71.mdf == 0)
PRINT_WARN("(%s): Repair will disable loader 0x%x "
"on DV\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1]);
else
PRINT_WARN("(%s): Repair will disable loader "
"(0x%x-0x%x) on DV\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x07:
PRINT_WARN("(%s): Repair will disable access to DV\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
case 0x08:
if (sense->fmt.f71.mdf == 0)
PRINT_WARN("(%s): Repair will disable message "
"display 0x%x on DV\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1]);
else
PRINT_WARN("(%s): Repair will disable message "
"displays (0x%x-0x%x) on DV\n",
- device->cdev->dev.bus_id,
+ dev_name(&device->cdev->dev),
sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
break;
case 0x09:
- PRINT_WARN("(%s): Clean DV\n", device->cdev->dev.bus_id);
+ PRINT_WARN("(%s): Clean DV\n", dev_name(&device->cdev->dev));
break;
default:
PRINT_WARN("(%s): DSIM ServiceMsg: 0x%02x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.smc);
+ dev_name(&device->cdev->dev), sense->fmt.f71.smc);
}
}
@@ -1279,18 +1282,18 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
return;
if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) {
if (tape_3590_msg[sense->mc] != NULL)
- PRINT_WARN("(%s): %s\n", device->cdev->dev.bus_id,
+ PRINT_WARN("(%s): %s\n", dev_name(&device->cdev->dev),
tape_3590_msg[sense->mc]);
else {
PRINT_WARN("(%s): Message Code 0x%x\n",
- device->cdev->dev.bus_id, sense->mc);
+ dev_name(&device->cdev->dev), sense->mc);
}
return;
}
if (sense->mc == 0xf0) {
/* Standard Media Information Message */
PRINT_WARN("(%s): MIM SEV=%i, MC=%02x, ES=%x/%x, "
- "RC=%02x-%04x-%02x\n", device->cdev->dev.bus_id,
+ "RC=%02x-%04x-%02x\n", dev_name(&device->cdev->dev),
sense->fmt.f70.sev, sense->mc,
sense->fmt.f70.emc, sense->fmt.f70.smc,
sense->fmt.f70.refcode, sense->fmt.f70.mid,
@@ -1302,7 +1305,7 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
/* Standard I/O Subsystem Service Information Message */
PRINT_WARN("(%s): IOSIM SEV=%i, DEVTYPE=3590/%02x, "
"MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.sev,
+ dev_name(&device->cdev->dev), sense->fmt.f71.sev,
device->cdev->id.dev_model,
sense->mc, sense->fmt.f71.emc,
sense->fmt.f71.smc, sense->fmt.f71.refcode1,
@@ -1314,7 +1317,7 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
/* Standard Device Service Information Message */
PRINT_WARN("(%s): DEVSIM SEV=%i, DEVTYPE=3590/%02x, "
"MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
- device->cdev->dev.bus_id, sense->fmt.f71.sev,
+ dev_name(&device->cdev->dev), sense->fmt.f71.sev,
device->cdev->id.dev_model,
sense->mc, sense->fmt.f71.emc,
sense->fmt.f71.smc, sense->fmt.f71.refcode1,
@@ -1327,7 +1330,7 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
return;
}
PRINT_WARN("(%s): Device Message(%x)\n",
- device->cdev->dev.bus_id, sense->mc);
+ dev_name(&device->cdev->dev), sense->mc);
}
static int tape_3590_crypt_error(struct tape_device *device,
@@ -1336,10 +1339,11 @@ static int tape_3590_crypt_error(struct tape_device *device,
u8 cu_rc, ekm_rc1;
u16 ekm_rc2;
u32 drv_rc;
- char *bus_id, *sense;
+ const char *bus_id;
+ char *sense;
sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
- bus_id = device->cdev->dev.bus_id;
+ bus_id = dev_name(&device->cdev->dev);
cu_rc = sense[0];
drv_rc = *((u32*) &sense[5]) & 0xffffff;
ekm_rc1 = sense[9];
@@ -1440,7 +1444,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
* "device intervention" is not very meaningfull
*/
PRINT_WARN("(%s): Tape operation when medium not loaded\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
tape_med_state_set(device, MS_UNLOADED);
tape_3590_schedule_work(device, TO_CRYPT_OFF);
return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
@@ -1487,18 +1491,18 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
case 0x6020:
PRINT_WARN("(%s): Cartridge of wrong type ?\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
case 0x8011:
PRINT_WARN("(%s): Another host has reserved the tape device\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
return tape_3590_erp_basic(device, request, irb, -EPERM);
case 0x8013:
PRINT_WARN("(%s): Another host has privileged access to the "
- "tape device\n", device->cdev->dev.bus_id);
+ "tape device\n", dev_name(&device->cdev->dev));
PRINT_WARN("(%s): To solve the problem unload the current "
- "cartridge!\n", device->cdev->dev.bus_id);
+ "cartridge!\n", dev_name(&device->cdev->dev));
return tape_3590_erp_basic(device, request, irb, -EPERM);
default:
return tape_3590_erp_basic(device, request, irb, -EIO);
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 95da72bc17e..a25b8bf54f4 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -278,7 +278,7 @@ tapeblock_cleanup_device(struct tape_device *device)
if (!device->blk_data.disk) {
PRINT_ERR("(%s): No gendisk to clean up!\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
goto cleanup_queue;
}
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 181a5441af1..d7073dbf825 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -215,12 +215,12 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
case MS_UNLOADED:
device->tape_generic_status |= GMT_DR_OPEN(~0);
PRINT_INFO("(%s): Tape is unloaded\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
case MS_LOADED:
device->tape_generic_status &= ~GMT_DR_OPEN(~0);
PRINT_INFO("(%s): Tape has been mounted\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
break;
default:
// print nothing
@@ -415,7 +415,7 @@ tape_generic_offline(struct tape_device *device)
device->cdev_id);
PRINT_WARN("(%s): Set offline failed "
"- drive in use.\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return -EBUSY;
}
@@ -538,7 +538,8 @@ tape_generic_probe(struct ccw_device *cdev)
ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
if (ret) {
tape_put_device(device);
- PRINT_ERR("probe failed for tape device %s\n", cdev->dev.bus_id);
+ PRINT_ERR("probe failed for tape device %s\n",
+ dev_name(&cdev->dev));
return ret;
}
cdev->dev.driver_data = device;
@@ -546,7 +547,7 @@ tape_generic_probe(struct ccw_device *cdev)
device->cdev = cdev;
ccw_device_get_id(cdev, &dev_id);
device->cdev_id = devid_to_int(&dev_id);
- PRINT_INFO("tape device %s found\n", cdev->dev.bus_id);
+ PRINT_INFO("tape device %s found\n", dev_name(&cdev->dev));
return ret;
}
@@ -616,7 +617,7 @@ tape_generic_remove(struct ccw_device *cdev)
device->cdev_id);
PRINT_WARN("(%s): Drive in use vanished - "
"expect trouble!\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
PRINT_WARN("State was %i\n", device->tape_state);
tape_state_set(device, TS_NOT_OPER);
__tape_discard_requests(device);
@@ -840,7 +841,7 @@ tape_dump_sense(struct tape_device* device, struct tape_request *request,
PRINT_INFO("-------------------------------------------------\n");
PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
irb->scsw.cmd.dstat, irb->scsw.cmd.cstat, irb->scsw.cmd.cpa);
- PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id);
+ PRINT_INFO("DEVICE: %s\n", dev_name(&device->cdev->dev));
if (request != NULL)
PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
@@ -1051,7 +1052,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
device = (struct tape_device *) cdev->dev.driver_data;
if (device == NULL) {
PRINT_ERR("could not get device structure for %s "
- "in interrupt\n", cdev->dev.bus_id);
+ "in interrupt\n", dev_name(&cdev->dev));
return;
}
request = (struct tape_request *) intparm;
@@ -1064,13 +1065,13 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
switch (PTR_ERR(irb)) {
case -ETIMEDOUT:
PRINT_WARN("(%s): Request timed out\n",
- cdev->dev.bus_id);
+ dev_name(&cdev->dev));
case -EIO:
__tape_end_request(device, request, -EIO);
break;
default:
PRINT_ERR("(%s): Unexpected i/o error %li\n",
- cdev->dev.bus_id,
+ dev_name(&cdev->dev),
PTR_ERR(irb));
}
return;
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index e7c888c14e7..8a376af926a 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -52,7 +52,7 @@ static int tape_proc_show(struct seq_file *m, void *v)
return 0;
spin_lock_irq(get_ccwdev_lock(device->cdev));
seq_printf(m, "%d\t", (int) n);
- seq_printf(m, "%-10.10s ", device->cdev->dev.bus_id);
+ seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev));
seq_printf(m, "%04X/", device->cdev->id.cu_type);
seq_printf(m, "%02X\t", device->cdev->id.cu_model);
seq_printf(m, "%04X/", device->cdev->id.dev_type);
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index cc8fd781ee2..5bd573d144d 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -47,7 +47,7 @@ tape_std_assign_timeout(unsigned long data)
rc = tape_cancel_io(device, request);
if(rc)
PRINT_ERR("(%s): Assign timeout: Cancel failed with rc = %i\n",
- device->cdev->dev.bus_id, rc);
+ dev_name(&device->cdev->dev), rc);
}
@@ -83,7 +83,7 @@ tape_std_assign(struct tape_device *device)
if (rc != 0) {
PRINT_WARN("%s: assign failed - device might be busy\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
device->cdev_id);
} else {
@@ -106,7 +106,7 @@ tape_std_unassign (struct tape_device *device)
DBF_EVENT(3, "(%08x): Can't unassign device\n",
device->cdev_id);
PRINT_WARN("(%s): Can't unassign device - device gone\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
return -EIO;
}
@@ -120,7 +120,8 @@ tape_std_unassign (struct tape_device *device)
if ((rc = tape_do_io(device, request)) != 0) {
DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
- PRINT_WARN("%s: Unassign failed\n", device->cdev->dev.bus_id);
+ PRINT_WARN("%s: Unassign failed\n",
+ dev_name(&device->cdev->dev));
} else {
DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
}
@@ -634,10 +635,10 @@ tape_std_mtcompression(struct tape_device *device, int mt_count)
DBF_EXCEPTION(6, "xcom parm\n");
if (*device->modeset_byte & 0x08)
PRINT_INFO("(%s) Compression is currently on\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
else
PRINT_INFO("(%s) Compression is currently off\n",
- device->cdev->dev.bus_id);
+ dev_name(&device->cdev->dev));
PRINT_INFO("Use 1 to switch compression on, 0 to "
"switch it off\n");
return -EINVAL;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c31faefa2b3..42173cc3461 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -724,8 +724,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (dev) {
- snprintf(dev->bus_id, BUS_ID_SIZE, "%s",
- priv->internal_name);
+ dev_set_name(dev, priv->internal_name);
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
@@ -751,7 +750,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
priv->class_device = device_create_drvdata(vmlogrdr_class, dev,
MKDEV(vmlogrdr_major,
priv->minor_num),
- priv, "%s", dev->bus_id);
+ priv, "%s", dev_name(dev));
if (IS_ERR(priv->class_device)) {
ret = PTR_ERR(priv->class_device);
priv->class_device=NULL;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index c1f352b8486..6fdfa5ddeca 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -886,11 +886,11 @@ static int ur_set_online(struct ccw_device *cdev)
goto fail_free_cdev;
if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
if (urd->class == DEV_CLASS_UR_I)
- sprintf(node_id, "vmrdr-%s", cdev->dev.bus_id);
+ sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
if (urd->class == DEV_CLASS_UR_O)
- sprintf(node_id, "vmpun-%s", cdev->dev.bus_id);
+ sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
} else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
- sprintf(node_id, "vmprt-%s", cdev->dev.bus_id);
+ sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
} else {
rc = -EOPNOTSUPP;
goto fail_free_cdev;
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 0bfcbbe375c..2f547b840ef 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -24,6 +24,7 @@
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
+#include "device.h"
/*
* "Blacklisting" of certain devices:
@@ -191,9 +192,9 @@ static int blacklist_parse_parameters(char *str, range_action action,
rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
msgtrigger);
if (rc)
- totalrc = 1;
+ totalrc = -EINVAL;
} else
- totalrc = 1;
+ totalrc = -EINVAL;
}
return totalrc;
@@ -240,8 +241,10 @@ static int blacklist_parse_proc_parameters(char *buf)
rc = blacklist_parse_parameters(buf, free, 0);
else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
+ else if (strcmp("purge", parm) == 0)
+ return ccw_purge_blacklisted();
else
- return 1;
+ return -EINVAL;
css_schedule_reprobe();
@@ -353,7 +356,7 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
}
ret = blacklist_parse_proc_parameters(buf);
if (ret)
- rc = -EINVAL;
+ rc = ret;
else
rc = user_len;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index e0ce65fca4e..3ac2c2019f5 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -113,7 +113,8 @@ ccwgroup_release (struct device *dev)
for (i = 0; i < gdev->count; i++) {
if (gdev->cdev[i]) {
- dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
+ if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
+ dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
put_device(&gdev->cdev[i]->dev);
}
}
@@ -268,8 +269,7 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
goto error;
}
- snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s",
- gdev->cdev[0]->dev.bus_id);
+ dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
rc = device_add(&gdev->dev);
if (rc)
@@ -296,6 +296,7 @@ error:
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
put_device(&gdev->cdev[i]->dev);
+ gdev->cdev[i] = NULL;
}
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index f1216cf6fa8..1246f61a533 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -393,8 +393,7 @@ int chp_new(struct chp_id chpid)
chp->state = 1;
chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
chp->dev.release = chp_release;
- snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid,
- chpid.id);
+ dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
/* Obtain channel path description and fill it in. */
ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 91ca87aa9f9..f49f0e502b8 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -261,7 +261,7 @@ static int chsc_examine_irb(struct chsc_request *request)
{
int backed_up;
- if (!scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND)
+ if (!(scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND))
return -EIO;
backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5954b905e3c..3db2c386546 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -114,6 +114,7 @@ cio_tpi(void)
struct tpi_info *tpi_info;
struct subchannel *sch;
struct irb *irb;
+ int irq_context;
tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
if (tpi (NULL) != 1)
@@ -126,7 +127,9 @@ cio_tpi(void)
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (!sch)
return 1;
- local_bh_disable();
+ irq_context = in_interrupt();
+ if (!irq_context)
+ local_bh_disable();
irq_enter ();
spin_lock(sch->lock);
memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
@@ -134,7 +137,8 @@ cio_tpi(void)
sch->driver->irq(sch);
spin_unlock(sch->lock);
irq_exit ();
- _local_bh_enable();
+ if (!irq_context)
+ _local_bh_enable();
return 1;
}
@@ -153,7 +157,7 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
"subchannel 0.%x.%04x!\n", sch->schid.ssid,
sch->schid.sch_no);
- sprintf(dbf_text, "no%s", sch->dev.bus_id);
+ sprintf(dbf_text, "no%s", dev_name(&sch->dev));
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
@@ -171,9 +175,10 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
union orb *orb;
CIO_TRACE_EVENT(4, "stIO");
- CIO_TRACE_EVENT(4, sch->dev.bus_id);
+ CIO_TRACE_EVENT(4, dev_name(&sch->dev));
orb = &to_io_private(sch)->orb;
+ memset(orb, 0, sizeof(union orb));
/* sch is always under 2G. */
orb->cmd.intparm = (u32)(addr_t)sch;
orb->cmd.fmt = 1;
@@ -231,7 +236,7 @@ cio_resume (struct subchannel *sch)
int ccode;
CIO_TRACE_EVENT (4, "resIO");
- CIO_TRACE_EVENT (4, sch->dev.bus_id);
+ CIO_TRACE_EVENT(4, dev_name(&sch->dev));
ccode = rsch (sch->schid);
@@ -268,7 +273,7 @@ cio_halt(struct subchannel *sch)
return -ENODEV;
CIO_TRACE_EVENT (2, "haltIO");
- CIO_TRACE_EVENT (2, sch->dev.bus_id);
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
/*
* Issue "Halt subchannel" and process condition code
@@ -303,7 +308,7 @@ cio_clear(struct subchannel *sch)
return -ENODEV;
CIO_TRACE_EVENT (2, "clearIO");
- CIO_TRACE_EVENT (2, sch->dev.bus_id);
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
/*
* Issue "Clear subchannel" and process condition code
@@ -339,7 +344,7 @@ cio_cancel (struct subchannel *sch)
return -ENODEV;
CIO_TRACE_EVENT (2, "cancelIO");
- CIO_TRACE_EVENT (2, sch->dev.bus_id);
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
ccode = xsch (sch->schid);
@@ -403,7 +408,7 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
int ret;
CIO_TRACE_EVENT (2, "ensch");
- CIO_TRACE_EVENT (2, sch->dev.bus_id);
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
if (sch_is_pseudo_sch(sch))
return -EINVAL;
@@ -453,7 +458,7 @@ int cio_disable_subchannel(struct subchannel *sch)
int ret;
CIO_TRACE_EVENT (2, "dissch");
- CIO_TRACE_EVENT (2, sch->dev.bus_id);
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
if (sch_is_pseudo_sch(sch))
return 0;
@@ -570,8 +575,10 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
}
mutex_init(&sch->reg_mutex);
/* Set a name for the subchannel */
- snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
- schid.sch_no);
+ if (cio_is_console(schid))
+ sch->dev.init_name = cio_get_console_sch_name(schid);
+ else
+ dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no);
/*
* The first subchannel that is not-operational (ccode==3)
@@ -676,6 +683,7 @@ do_IRQ (struct pt_regs *regs)
#ifdef CONFIG_CCW_CONSOLE
static struct subchannel console_subchannel;
+static char console_sch_name[10] = "0.x.xxxx";
static struct io_subchannel_private console_priv;
static int console_subchannel_in_use;
@@ -826,6 +834,12 @@ cio_get_console_subchannel(void)
return &console_subchannel;
}
+const char *cio_get_console_sch_name(struct subchannel_id schid)
+{
+ snprintf(console_sch_name, 10, "0.%x.%04x", schid.ssid, schid.sch_no);
+ return (const char *)console_sch_name;
+}
+
#endif
static int
__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
@@ -845,19 +859,6 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
return -EBUSY; /* uhm... */
}
-/* we can't use the normal udelay here, since it enables external interrupts */
-
-static void udelay_reset(unsigned long usecs)
-{
- uint64_t start_cc, end_cc;
-
- asm volatile ("STCK %0" : "=m" (start_cc));
- do {
- cpu_relax();
- asm volatile ("STCK %0" : "=m" (end_cc));
- } while (((end_cc - start_cc)/4096) < usecs);
-}
-
static int
__clear_io_subchannel_easy(struct subchannel_id schid)
{
@@ -873,7 +874,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
if (schid_equal(&ti.schid, &schid))
return 0;
}
- udelay_reset(100);
+ udelay_simple(100);
}
return -EBUSY;
}
@@ -881,7 +882,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
static void __clear_chsc_subchannel_easy(void)
{
/* It seems we can only wait for a bit here :/ */
- udelay_reset(100);
+ udelay_simple(100);
}
static int pgm_check_occured;
@@ -891,7 +892,7 @@ static void cio_reset_pgm_check_handler(void)
pgm_check_occured = 1;
}
-static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr)
+static int stsch_reset(struct subchannel_id schid, struct schib *addr)
{
int rc;
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 3b236d20e83..0fb24784e92 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -117,11 +117,15 @@ extern int cio_is_console(struct subchannel_id);
extern struct subchannel *cio_get_console_subchannel(void);
extern spinlock_t * cio_get_console_lock(void);
extern void *cio_get_console_priv(void);
+extern const char *cio_get_console_sch_name(struct subchannel_id schid);
+extern const char *cio_get_console_cdev_name(struct subchannel *sch);
#else
#define cio_is_console(schid) 0
#define cio_get_console_subchannel() NULL
#define cio_get_console_lock() NULL
#define cio_get_console_priv() NULL
+#define cio_get_console_sch_name(schid) NULL
+#define cio_get_console_cdev_name(sch) NULL
#endif
#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 1261e1a9e8c..76bbb1e74c2 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -698,7 +698,7 @@ static int __init setup_css(int nr)
return -ENOMEM;
css->pseudo_subchannel->dev.parent = &css->device;
css->pseudo_subchannel->dev.release = css_subchannel_release;
- sprintf(css->pseudo_subchannel->dev.bus_id, "defunct");
+ dev_set_name(&css->pseudo_subchannel->dev, "defunct");
ret = cio_create_sch_lock(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
@@ -707,7 +707,7 @@ static int __init setup_css(int nr)
mutex_init(&css->mutex);
css->valid = 1;
css->cssid = nr;
- sprintf(css->device.bus_id, "css%x", nr);
+ dev_set_name(&css->device, "css%x", nr);
css->device.release = channel_subsystem_release;
tod_high = (u32) (get_clock() >> 32);
css_generate_pgid(css, tod_high);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 28221030b88..4e78c82194b 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -31,6 +31,7 @@
#include "device.h"
#include "ioasm.h"
#include "io_sch.h"
+#include "blacklist.h"
static struct timer_list recovery_timer;
static DEFINE_SPINLOCK(recovery_lock);
@@ -296,36 +297,33 @@ static void ccw_device_unregister(struct ccw_device *cdev)
device_del(&cdev->dev);
}
-static void ccw_device_remove_orphan_cb(struct device *dev)
+static void ccw_device_remove_orphan_cb(struct work_struct *work)
{
- struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_private *priv;
+ struct ccw_device *cdev;
+ priv = container_of(work, struct ccw_device_private, kick_work);
+ cdev = priv->cdev;
ccw_device_unregister(cdev);
put_device(&cdev->dev);
+ /* Release cdev reference for workqueue processing. */
+ put_device(&cdev->dev);
}
-static void ccw_device_remove_sch_cb(struct device *dev)
-{
- struct subchannel *sch;
-
- sch = to_subchannel(dev);
- css_sch_device_unregister(sch);
- /* Reset intparm to zeroes. */
- sch->schib.pmcw.intparm = 0;
- cio_modify(sch);
- put_device(&sch->dev);
-}
+static void ccw_device_call_sch_unregister(struct work_struct *work);
static void
ccw_device_remove_disconnected(struct ccw_device *cdev)
{
unsigned long flags;
- int rc;
/*
* Forced offline in disconnected state means
* 'throw away device'.
*/
+ /* Get cdev reference for workqueue processing. */
+ if (!get_device(&cdev->dev))
+ return;
if (ccw_device_is_orphan(cdev)) {
/*
* Deregister ccw device.
@@ -335,23 +333,13 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
spin_lock_irqsave(cdev->ccwlock, flags);
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irqrestore(cdev->ccwlock, flags);
- rc = device_schedule_callback(&cdev->dev,
- ccw_device_remove_orphan_cb);
- if (rc)
- CIO_MSG_EVENT(0, "Couldn't unregister orphan "
- "0.%x.%04x\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
- return;
- }
- /* Deregister subchannel, which will kill the ccw device. */
- rc = device_schedule_callback(cdev->dev.parent,
- ccw_device_remove_sch_cb);
- if (rc)
- CIO_MSG_EVENT(0, "Couldn't unregister disconnected device "
- "0.%x.%04x\n",
- cdev->private->dev_id.ssid,
- cdev->private->dev_id.devno);
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_remove_orphan_cb);
+ } else
+ /* Deregister subchannel, which will kill the ccw device. */
+ PREPARE_WORK(&cdev->private->kick_work,
+ ccw_device_call_sch_unregister);
+ queue_work(slow_path_wq, &cdev->private->kick_work);
}
/**
@@ -970,12 +958,17 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
+ /* Get subchannel reference for local processing. */
+ if (!get_device(cdev->dev.parent))
+ return;
sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch);
/* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
+ /* Release cdev reference for workqueue processing.*/
put_device(&cdev->dev);
+ /* Release subchannel reference for local processing. */
put_device(&sch->dev);
}
@@ -1001,6 +994,8 @@ io_subchannel_recog_done(struct ccw_device *cdev)
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister);
queue_work(slow_path_wq, &cdev->private->kick_work);
+ /* Release subchannel reference for asynchronous recognition. */
+ put_device(&sch->dev);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
break;
@@ -1040,8 +1035,11 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
init_timer(&priv->timer);
/* Set an initial name for the device. */
- snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
- sch->schid.ssid, sch->schib.pmcw.dev);
+ if (cio_is_console(sch->schid))
+ cdev->dev.init_name = cio_get_console_cdev_name(sch);
+ else
+ dev_set_name(&cdev->dev, "0.%x.%04x",
+ sch->schid.ssid, sch->schib.pmcw.dev);
/* Increase counter of devices currently in recognition. */
atomic_inc(&ccw_device_init_count);
@@ -1106,7 +1104,7 @@ static void io_subchannel_irq(struct subchannel *sch)
cdev = sch_get_cdev(sch);
CIO_TRACE_EVENT(3, "IRQ");
- CIO_TRACE_EVENT(3, sch->dev.bus_id);
+ CIO_TRACE_EVENT(3, dev_name(&sch->dev));
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
}
@@ -1476,6 +1474,45 @@ static void ccw_device_schedule_recovery(void)
spin_unlock_irqrestore(&recovery_lock, flags);
}
+static int purge_fn(struct device *dev, void *data)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct ccw_device_private *priv = cdev->private;
+ int unreg;
+
+ spin_lock_irq(cdev->ccwlock);
+ unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) &&
+ (priv->state == DEV_STATE_OFFLINE);
+ spin_unlock_irq(cdev->ccwlock);
+ if (!unreg)
+ goto out;
+ if (!get_device(&cdev->dev))
+ goto out;
+ CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
+ priv->dev_id.devno);
+ PREPARE_WORK(&cdev->private->kick_work, ccw_device_call_sch_unregister);
+ queue_work(slow_path_wq, &cdev->private->kick_work);
+
+out:
+ /* Abort loop in case of pending signal. */
+ if (signal_pending(current))
+ return -EINTR;
+
+ return 0;
+}
+
+/**
+ * ccw_purge_blacklisted - purge unused, blacklisted devices
+ *
+ * Unregister all ccw devices that are offline and on the blacklist.
+ */
+int ccw_purge_blacklisted(void)
+{
+ CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
+ bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
+ return 0;
+}
+
static void device_set_disconnected(struct ccw_device *cdev)
{
if (!cdev)
@@ -1492,7 +1529,7 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
struct subchannel *sch = to_subchannel(cdev->dev.parent);
CIO_TRACE_EVENT(2, "notoper");
- CIO_TRACE_EVENT(2, sch->dev.bus_id);
+ CIO_TRACE_EVENT(2, dev_name(&sch->dev));
ccw_device_set_timeout(cdev, 0);
cio_disable_subchannel(sch);
cdev->private->state = DEV_STATE_NOT_OPER;
@@ -1591,6 +1628,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
#ifdef CONFIG_CCW_CONSOLE
static struct ccw_device console_cdev;
+static char console_cdev_name[10] = "0.x.xxxx";
static struct ccw_device_private console_private;
static int console_cdev_in_use;
@@ -1661,6 +1699,14 @@ ccw_device_probe_console(void)
console_cdev.online = 1;
return &console_cdev;
}
+
+
+const char *cio_get_console_cdev_name(struct subchannel *sch)
+{
+ snprintf(console_cdev_name, 10, "0.%x.%04x",
+ sch->schid.ssid, sch->schib.pmcw.dev);
+ return (const char *)console_cdev_name;
+}
#endif
/*
@@ -1673,7 +1719,7 @@ __ccwdev_check_busid(struct device *dev, void *id)
bus_id = id;
- return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0);
+ return (strncmp(bus_id, dev_name(dev), BUS_ID_SIZE) == 0);
}
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 6f5c3f2b358..104ed669db4 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -86,6 +86,7 @@ int ccw_device_is_orphan(struct ccw_device *);
int ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *);
int ccw_device_offline(struct ccw_device *);
+int ccw_purge_blacklisted(void);
/* Function prototypes for device status and basic sense stuff. */
void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 84cc9ea346d..10bc03940fb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -52,8 +52,10 @@ static void ccw_timeout_log(struct ccw_device *cdev)
printk(KERN_WARNING "cio: orb:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
orb, sizeof(*orb), 0);
- printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
- printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
+ printk(KERN_WARNING "cio: ccw device bus id: %s\n",
+ dev_name(&cdev->dev));
+ printk(KERN_WARNING "cio: subchannel bus id: %s\n",
+ dev_name(&sch->dev));
printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
"vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index ee1a28310fb..eabcc42d63d 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -498,7 +498,7 @@ ccw_device_stlck(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
CIO_TRACE_EVENT(2, "stl lock");
- CIO_TRACE_EVENT(2, cdev->dev.bus_id);
+ CIO_TRACE_EVENT(2, dev_name(&cdev->dev));
buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
if (!buf)
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 3f8f1cf69c7..c4f3e7c9a85 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -123,7 +123,7 @@ struct ccw_device_private {
void *cmb_wait; /* deferred cmb enable/disable */
};
-static inline int ssch(struct subchannel_id schid, volatile union orb *addr)
+static inline int ssch(struct subchannel_id schid, union orb *addr)
{
register struct subchannel_id reg1 asm("1") = schid;
int ccode = -EIO;
@@ -134,7 +134,9 @@ static inline int ssch(struct subchannel_id schid, volatile union orb *addr)
" srl %0,28\n"
"1:\n"
EX_TABLE(0b, 1b)
- : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
+ : "+d" (ccode)
+ : "d" (reg1), "a" (addr), "m" (*addr)
+ : "cc", "memory");
return ccode;
}
@@ -147,7 +149,9 @@ static inline int rsch(struct subchannel_id schid)
" rsch\n"
" ipm %0\n"
" srl %0,28"
- : "=d" (ccode) : "d" (reg1) : "cc");
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc", "memory");
return ccode;
}
@@ -160,7 +164,9 @@ static inline int csch(struct subchannel_id schid)
" csch\n"
" ipm %0\n"
" srl %0,28"
- : "=d" (ccode) : "d" (reg1) : "cc");
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
return ccode;
}
@@ -173,7 +179,9 @@ static inline int hsch(struct subchannel_id schid)
" hsch\n"
" ipm %0\n"
" srl %0,28"
- : "=d" (ccode) : "d" (reg1) : "cc");
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
return ccode;
}
@@ -186,7 +194,9 @@ static inline int xsch(struct subchannel_id schid)
" .insn rre,0xb2760000,%1,0\n"
" ipm %0\n"
" srl %0,28"
- : "=d" (ccode) : "d" (reg1) : "cc");
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
return ccode;
}
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 9fa2ac13ac8..75926279263 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -23,38 +23,39 @@ struct tpi_info {
* Some S390 specific IO instructions as inline
*/
-static inline int stsch(struct subchannel_id schid,
- volatile struct schib *addr)
+static inline int stsch(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
asm volatile(
- " stsch 0(%2)\n"
+ " stsch 0(%3)\n"
" ipm %0\n"
" srl %0,28"
- : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
+ : "=d" (ccode), "=m" (*addr)
+ : "d" (reg1), "a" (addr)
+ : "cc");
return ccode;
}
-static inline int stsch_err(struct subchannel_id schid,
- volatile struct schib *addr)
+static inline int stsch_err(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
int ccode = -EIO;
asm volatile(
- " stsch 0(%2)\n"
+ " stsch 0(%3)\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
- : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
+ : "+d" (ccode), "=m" (*addr)
+ : "d" (reg1), "a" (addr)
+ : "cc");
return ccode;
}
-static inline int msch(struct subchannel_id schid,
- volatile struct schib *addr)
+static inline int msch(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
@@ -63,12 +64,13 @@ static inline int msch(struct subchannel_id schid,
" msch 0(%2)\n"
" ipm %0\n"
" srl %0,28"
- : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
+ : "=d" (ccode)
+ : "d" (reg1), "a" (addr), "m" (*addr)
+ : "cc");
return ccode;
}
-static inline int msch_err(struct subchannel_id schid,
- volatile struct schib *addr)
+static inline int msch_err(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
int ccode = -EIO;
@@ -79,33 +81,38 @@ static inline int msch_err(struct subchannel_id schid,
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
- : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
+ : "+d" (ccode)
+ : "d" (reg1), "a" (addr), "m" (*addr)
+ : "cc");
return ccode;
}
-static inline int tsch(struct subchannel_id schid,
- volatile struct irb *addr)
+static inline int tsch(struct subchannel_id schid, struct irb *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
asm volatile(
- " tsch 0(%2)\n"
+ " tsch 0(%3)\n"
" ipm %0\n"
" srl %0,28"
- : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
+ : "=d" (ccode), "=m" (*addr)
+ : "d" (reg1), "a" (addr)
+ : "cc");
return ccode;
}
-static inline int tpi( volatile struct tpi_info *addr)
+static inline int tpi(struct tpi_info *addr)
{
int ccode;
asm volatile(
- " tpi 0(%1)\n"
+ " tpi 0(%2)\n"
" ipm %0\n"
" srl %0,28"
- : "=d" (ccode) : "a" (addr), "m" (*addr) : "cc");
+ : "=d" (ccode), "=m" (*addr)
+ : "a" (addr)
+ : "cc");
return ccode;
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index c1a70985abf..e3ea1d5f281 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -16,6 +16,14 @@
#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */
#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
+/*
+ * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
+ * till next initiative to give transmitted skbs back to the stack is too long.
+ * Therefore polling is started in case of multicast queue is filled more
+ * than 50 percent.
+ */
+#define QDIO_IQDIO_POLL_LVL 65 /* HS multicast queue */
+
enum qdio_irq_states {
QDIO_IRQ_STATE_INACTIVE,
QDIO_IRQ_STATE_ESTABLISHED,
@@ -195,6 +203,9 @@ struct qdio_output_q {
/* PCIs are enabled for the queue */
int pci_out_enabled;
+ /* IQDIO: output multiple buffers (enhanced SIGA) */
+ int use_enh_siga;
+
/* timer to check for more outbound work */
struct timer_list timer;
};
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 337aa3087a7..b5390821434 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -155,7 +155,7 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name)
{
memset(name, 0, sizeof(name));
- sprintf(name, "%s", cdev->dev.bus_id);
+ sprintf(name, "%s", dev_name(&cdev->dev));
if (q->is_input_q)
sprintf(name + strlen(name), "_input");
else
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e6eabc85342..a50682d2a0f 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -316,6 +316,9 @@ static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
unsigned int fc = 0;
unsigned long schid;
+ if (q->u.out.use_enh_siga) {
+ fc = 3;
+ }
if (!is_qebsm(q))
schid = *((u32 *)&q->irq_ptr->schid);
else {
@@ -851,6 +854,12 @@ static void __qdio_outbound_processing(struct qdio_q *q)
if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
return;
+ if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
+ (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) {
+ tasklet_schedule(&q->tasklet);
+ return;
+ }
+
if (q->u.out.pci_out_enabled)
return;
@@ -956,7 +965,7 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
char dbf_text[15];
QDIO_DBF_TEXT2(1, trace, "ick2");
- sprintf(dbf_text, "%s", cdev->dev.bus_id);
+ sprintf(dbf_text, "%s", dev_name(&cdev->dev));
QDIO_DBF_TEXT2(1, trace, dbf_text);
QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int));
QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int));
@@ -1443,6 +1452,8 @@ int qdio_establish(struct qdio_initialize *init_data)
}
qdio_setup_ssqd_info(irq_ptr);
+ sprintf(dbf_text, "qDmmwc%2x", irq_ptr->ssqd_desc.mmwc);
+ QDIO_DBF_TEXT2(0, setup, dbf_text);
sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac);
QDIO_DBF_TEXT2(0, setup, dbf_text);
@@ -1615,12 +1626,21 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags,
if (multicast_outbound(q))
qdio_kick_outbound_q(q);
else
- /*
- * One siga-w per buffer required for unicast
- * HiperSockets.
- */
- while (count--)
+ if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
+ (count > 1) &&
+ (count <= q->irq_ptr->ssqd_desc.mmwc)) {
+ /* exploit enhanced SIGA */
+ q->u.out.use_enh_siga = 1;
qdio_kick_outbound_q(q);
+ } else {
+ /*
+ * One siga-w per buffer required for unicast
+ * HiperSockets.
+ */
+ q->u.out.use_enh_siga = 0;
+ while (count--)
+ qdio_kick_outbound_q(q);
+ }
goto out;
}
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 1679e2f91c9..a0b6b46e746 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -447,51 +447,36 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
{
char s[80];
- sprintf(s, "%s sc:%x ", cdev->dev.bus_id, irq_ptr->schid.sch_no);
-
+ sprintf(s, "qdio: %s ", dev_name(&cdev->dev));
switch (irq_ptr->qib.qfmt) {
case QDIO_QETH_QFMT:
- sprintf(s + strlen(s), "OSADE ");
+ sprintf(s + strlen(s), "OSA ");
break;
case QDIO_ZFCP_QFMT:
sprintf(s + strlen(s), "ZFCP ");
break;
case QDIO_IQDIO_QFMT:
- sprintf(s + strlen(s), "HiperSockets ");
+ sprintf(s + strlen(s), "HS ");
break;
}
- sprintf(s + strlen(s), "using: ");
-
- if (!is_thinint_irq(irq_ptr))
- sprintf(s + strlen(s), "no");
- sprintf(s + strlen(s), "AdapterInterrupts ");
- if (!(irq_ptr->sch_token != 0))
- sprintf(s + strlen(s), "no");
- sprintf(s + strlen(s), "QEBSM ");
- if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
- sprintf(s + strlen(s), "no");
- sprintf(s + strlen(s), "OutboundPCI ");
- if (!css_general_characteristics.aif_tdd)
- sprintf(s + strlen(s), "no");
- sprintf(s + strlen(s), "TDD\n");
- printk(KERN_INFO "qdio: %s", s);
-
- memset(s, 0, sizeof(s));
- sprintf(s, "%s SIGA required: ", cdev->dev.bus_id);
- if (irq_ptr->siga_flag.input)
- sprintf(s + strlen(s), "Read ");
- if (irq_ptr->siga_flag.output)
- sprintf(s + strlen(s), "Write ");
- if (irq_ptr->siga_flag.sync)
- sprintf(s + strlen(s), "Sync ");
- if (!irq_ptr->siga_flag.no_sync_ti)
- sprintf(s + strlen(s), "SyncAI ");
- if (!irq_ptr->siga_flag.no_sync_out_ti)
- sprintf(s + strlen(s), "SyncOutAI ");
- if (!irq_ptr->siga_flag.no_sync_out_pci)
- sprintf(s + strlen(s), "SyncOutPCI");
+ sprintf(s + strlen(s), "on SC %x using ", irq_ptr->schid.sch_no);
+ sprintf(s + strlen(s), "AI:%d ", is_thinint_irq(irq_ptr));
+ sprintf(s + strlen(s), "QEBSM:%d ", (irq_ptr->sch_token) ? 1 : 0);
+ sprintf(s + strlen(s), "PCI:%d ",
+ (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0);
+ sprintf(s + strlen(s), "TDD:%d ", css_general_characteristics.aif_tdd);
+ sprintf(s + strlen(s), "SIGA:");
+ sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.input) ? "R" : " ");
+ sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.output) ? "W" : " ");
+ sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.sync) ? "S" : " ");
+ sprintf(s + strlen(s), "%s",
+ (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ");
+ sprintf(s + strlen(s), "%s",
+ (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ");
+ sprintf(s + strlen(s), "%s",
+ (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
sprintf(s + strlen(s), "\n");
- printk(KERN_INFO "qdio: %s", s);
+ printk(KERN_INFO "%s", s);
}
int __init qdio_setup_init(void)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 62b6b55230d..326db1e827c 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -892,8 +892,8 @@ static void ap_scan_bus(struct work_struct *unused)
ap_dev->device.bus = &ap_bus_type;
ap_dev->device.parent = ap_root_device;
- snprintf(ap_dev->device.bus_id, BUS_ID_SIZE, "card%02x",
- AP_QID_DEVICE(ap_dev->qid));
+ dev_set_name(&ap_dev->device, "card%02x",
+ AP_QID_DEVICE(ap_dev->qid));
ap_dev->device.release = ap_device_release;
rc = device_register(&ap_dev->device);
if (rc) {
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 292b60da6dc..ff4a6931bb8 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -24,6 +24,7 @@
#include <asm/kvm_virtio.h>
#include <asm/setup.h>
#include <asm/s390_ext.h>
+#include <asm/s390_rdev.h>
#define VIRTIO_SUBCODE_64 0x0D00
@@ -241,10 +242,7 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
* The root device for the kvm virtio devices.
* This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
*/
-static struct device kvm_root = {
- .parent = NULL,
- .bus_id = "kvm_s390",
-};
+static struct device *kvm_root;
/*
* adds a new device and register it with virtio
@@ -261,7 +259,7 @@ static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset)
return;
}
- kdev->vdev.dev.parent = &kvm_root;
+ kdev->vdev.dev.parent = kvm_root;
kdev->vdev.id.device = d->type;
kdev->vdev.config = &kvm_vq_configspace_ops;
kdev->desc = d;
@@ -317,15 +315,16 @@ static int __init kvm_devices_init(void)
if (!MACHINE_IS_KVM)
return -ENODEV;
- rc = device_register(&kvm_root);
- if (rc) {
+ kvm_root = s390_root_dev_register("kvm_s390");
+ if (IS_ERR(kvm_root)) {
+ rc = PTR_ERR(kvm_root);
printk(KERN_ERR "Could not register kvm_s390 root device");
return rc;
}
rc = vmem_add_mapping(PFN_PHYS(max_pfn), PAGE_SIZE);
if (rc) {
- device_unregister(&kvm_root);
+ s390_root_dev_unregister(kvm_root);
return rc;
}
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index e10ac9ab2d4..8f83fc994f5 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -299,7 +299,7 @@ claw_probe(struct ccwgroup_device *cgdev)
probe_error(cgdev);
put_device(&cgdev->dev);
printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n",
- cgdev->cdev[0]->dev.bus_id,__func__,__LINE__);
+ dev_name(&cgdev->cdev[0]->dev), __func__, __LINE__);
CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
return rc;
}
@@ -584,7 +584,7 @@ claw_irq_handler(struct ccw_device *cdev,
if (!cdev->dev.driver_data) {
printk(KERN_WARNING "claw: unsolicited interrupt for device:"
"%s received c-%02x d-%02x\n",
- cdev->dev.bus_id, irb->scsw.cmd.cstat,
+ dev_name(&cdev->dev), irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat);
CLAW_DBF_TEXT(2, trace, "badirq");
return;
@@ -598,7 +598,7 @@ claw_irq_handler(struct ccw_device *cdev,
p_ch = &privptr->channel[WRITE];
else {
printk(KERN_WARNING "claw: Can't determine channel for "
- "interrupt, device %s\n", cdev->dev.bus_id);
+ "interrupt, device %s\n", dev_name(&cdev->dev));
CLAW_DBF_TEXT(2, trace, "badchan");
return;
}
@@ -662,7 +662,7 @@ claw_irq_handler(struct ccw_device *cdev,
printk(KERN_WARNING "claw: unsolicited "
"interrupt for device:"
"%s received c-%02x d-%02x\n",
- cdev->dev.bus_id,
+ dev_name(&cdev->dev),
irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat);
return;
@@ -1136,19 +1136,20 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
break;
case -ENODEV:
printk(KERN_EMERG "%s: Missing device called "
- "for IO ENODEV\n", cdev->dev.bus_id);
+ "for IO ENODEV\n", dev_name(&cdev->dev));
break;
case -EIO:
printk(KERN_EMERG "%s: Status pending... EIO \n",
- cdev->dev.bus_id);
+ dev_name(&cdev->dev));
break;
case -EINVAL:
printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n",
- cdev->dev.bus_id);
+ dev_name(&cdev->dev));
break;
default:
printk(KERN_EMERG "%s: Unknown error in "
- "Do_IO %d\n",cdev->dev.bus_id, return_code);
+ "Do_IO %d\n", dev_name(&cdev->dev),
+ return_code);
}
}
CLAW_DBF_TEXT(4, trace, "ccwret");
@@ -2848,11 +2849,11 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
struct chbk *p_ch;
struct ccw_dev_id dev_id;
- CLAW_DBF_TEXT_(2, setup, "%s", cdev->dev.bus_id);
+ CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
p_ch = &privptr->channel[i];
p_ch->cdev = cdev;
- snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", cdev->dev.bus_id);
+ snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
ccw_device_get_id(cdev, &dev_id);
p_ch->devno = dev_id.devno;
if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
@@ -2879,7 +2880,8 @@ claw_new_device(struct ccwgroup_device *cgdev)
int ret;
struct ccw_dev_id dev_id;
- printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id);
+ printk(KERN_INFO "claw: add for %s\n",
+ dev_name(&cgdev->cdev[READ]->dev));
CLAW_DBF_TEXT(2, setup, "new_dev");
privptr = cgdev->dev.driver_data;
cgdev->cdev[READ]->dev.driver_data = privptr;
@@ -2903,14 +2905,16 @@ claw_new_device(struct ccwgroup_device *cgdev)
if (ret != 0) {
printk(KERN_WARNING
"claw: ccw_device_set_online %s READ failed "
- "with ret = %d\n",cgdev->cdev[READ]->dev.bus_id,ret);
+ "with ret = %d\n", dev_name(&cgdev->cdev[READ]->dev),
+ ret);
goto out;
}
ret = ccw_device_set_online(cgdev->cdev[WRITE]);
if (ret != 0) {
printk(KERN_WARNING
"claw: ccw_device_set_online %s WRITE failed "
- "with ret = %d\n",cgdev->cdev[WRITE]->dev.bus_id, ret);
+ "with ret = %d\n", dev_name(&cgdev->cdev[WRITE]->dev)
+ ret);
goto out;
}
dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
@@ -2986,7 +2990,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
struct net_device *ndev;
int ret;
- CLAW_DBF_TEXT_(2, setup, "%s", cgdev->dev.bus_id);
+ CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
priv = cgdev->dev.driver_data;
if (!priv)
return -ENODEV;
@@ -3016,11 +3020,11 @@ claw_remove_device(struct ccwgroup_device *cgdev)
struct claw_privbk *priv;
BUG_ON(!cgdev);
- CLAW_DBF_TEXT_(2, setup, "%s", cgdev->dev.bus_id);
+ CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
priv = cgdev->dev.driver_data;
BUG_ON(!priv);
printk(KERN_INFO "claw: %s() called %s will be removed.\n",
- __func__,cgdev->cdev[0]->dev.bus_id);
+ __func__, dev_name(&cgdev->cdev[0]->dev));
if (cgdev->state == CCWGROUP_ONLINE)
claw_shutdown_device(cgdev);
claw_remove_files(&cgdev->dev);
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 1a89d989f34..005072c420d 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -85,7 +85,7 @@
#define CLAW_MAX_DEV 256 /* max claw devices */
#define MAX_NAME_LEN 8 /* host name, adapter name length */
#define CLAW_FRAME_SIZE 4096
-#define CLAW_ID_SIZE BUS_ID_SIZE+3
+#define CLAW_ID_SIZE 20+3
/* state machine codes used in claw_irq_handler */
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index b11fec24c7d..a4e29836a2a 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -277,18 +277,18 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN,
"irb error %ld on device %s\n",
- PTR_ERR(irb), cdev->dev.bus_id);
+ PTR_ERR(irb), dev_name(&cdev->dev));
switch (PTR_ERR(irb)) {
case -EIO:
- ctcm_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
+ ctcm_pr_warn("i/o-error on device %s\n", dev_name(&cdev->dev));
break;
case -ETIMEDOUT:
- ctcm_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
+ ctcm_pr_warn("timeout on device %s\n", dev_name(&cdev->dev));
break;
default:
ctcm_pr_warn("unknown error %ld on device %s\n",
- PTR_ERR(irb), cdev->dev.bus_id);
+ PTR_ERR(irb), dev_name(&cdev->dev));
}
return PTR_ERR(irb);
}
@@ -1182,7 +1182,7 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
int dstat;
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
- "Enter %s(%s)", CTCM_FUNTAIL, &cdev->dev.bus_id);
+ "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
if (ctcm_check_irb_error(cdev, irb))
return;
@@ -1208,14 +1208,14 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
ch = priv->channel[WRITE];
else {
ctcm_pr_err("ctcm: Can't determine channel for interrupt, "
- "device %s\n", cdev->dev.bus_id);
+ "device %s\n", dev_name(&cdev->dev));
return;
}
dev = ch->netdev;
if (dev == NULL) {
ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n",
- __func__, cdev->dev.bus_id, ch);
+ __func__, dev_name(&cdev->dev), ch);
return;
}
@@ -1329,7 +1329,7 @@ static int add_channel(struct ccw_device *cdev, enum channel_types type,
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"%s(%s), type %d, proto %d",
- __func__, cdev->dev.bus_id, type, priv->protocol);
+ __func__, dev_name(&cdev->dev), type, priv->protocol);
ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (ch == NULL)
@@ -1358,7 +1358,7 @@ static int add_channel(struct ccw_device *cdev, enum channel_types type,
goto nomem_return;
ch->cdev = cdev;
- snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", cdev->dev.bus_id);
+ snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev));
ch->type = type;
/**
@@ -1518,8 +1518,8 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
type = get_channel_type(&cdev0->id);
- snprintf(read_id, CTCM_ID_SIZE, "ch-%s", cdev0->dev.bus_id);
- snprintf(write_id, CTCM_ID_SIZE, "ch-%s", cdev1->dev.bus_id);
+ snprintf(read_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev0->dev));
+ snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
ret = add_channel(cdev0, type, priv);
if (ret)
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index 8e10ee86a5e..d77cce3fe4d 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -104,7 +104,7 @@
#define READ 0
#define WRITE 1
-#define CTCM_ID_SIZE BUS_ID_SIZE+3
+#define CTCM_ID_SIZE 20+3
struct ctcm_profile {
unsigned long maxmulti;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 9bcfa04d863..0825be87e5a 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -492,7 +492,7 @@ lcs_start_channel(struct lcs_channel *channel)
unsigned long flags;
int rc;
- LCS_DBF_TEXT_(4,trace,"ssch%s", channel->ccwdev->dev.bus_id);
+ LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev));
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_start(channel->ccwdev,
channel->ccws + channel->io_idx, 0, 0,
@@ -501,7 +501,8 @@ lcs_start_channel(struct lcs_channel *channel)
channel->state = LCS_CH_STATE_RUNNING;
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
- LCS_DBF_TEXT_(4,trace,"essh%s", channel->ccwdev->dev.bus_id);
+ LCS_DBF_TEXT_(4,trace,"essh%s",
+ dev_name(&channel->ccwdev->dev));
PRINT_ERR("Error in starting channel, rc=%d!\n", rc);
}
return rc;
@@ -514,12 +515,13 @@ lcs_clear_channel(struct lcs_channel *channel)
int rc;
LCS_DBF_TEXT(4,trace,"clearch");
- LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id);
+ LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_clear(channel->ccwdev, (addr_t) channel);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
- LCS_DBF_TEXT_(4,trace,"ecsc%s", channel->ccwdev->dev.bus_id);
+ LCS_DBF_TEXT_(4, trace, "ecsc%s",
+ dev_name(&channel->ccwdev->dev));
return rc;
}
wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
@@ -540,13 +542,14 @@ lcs_stop_channel(struct lcs_channel *channel)
if (channel->state == LCS_CH_STATE_STOPPED)
return 0;
LCS_DBF_TEXT(4,trace,"haltsch");
- LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id);
+ LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
channel->state = LCS_CH_STATE_INIT;
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
- LCS_DBF_TEXT_(4,trace,"ehsc%s", channel->ccwdev->dev.bus_id);
+ LCS_DBF_TEXT_(4, trace, "ehsc%s",
+ dev_name(&channel->ccwdev->dev));
return rc;
}
/* Asynchronous halt initialted. Wait for its completion. */
@@ -632,10 +635,11 @@ __lcs_resume_channel(struct lcs_channel *channel)
return 0;
if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
return 0;
- LCS_DBF_TEXT_(5, trace, "rsch%s", channel->ccwdev->dev.bus_id);
+ LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev));
rc = ccw_device_resume(channel->ccwdev);
if (rc) {
- LCS_DBF_TEXT_(4, trace, "ersc%s", channel->ccwdev->dev.bus_id);
+ LCS_DBF_TEXT_(4, trace, "ersc%s",
+ dev_name(&channel->ccwdev->dev));
PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc);
} else
channel->state = LCS_CH_STATE_RUNNING;
@@ -1302,18 +1306,18 @@ lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
switch (PTR_ERR(irb)) {
case -EIO:
- PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
+ PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev));
LCS_DBF_TEXT(2, trace, "ckirberr");
LCS_DBF_TEXT_(2, trace, " rc%d", -EIO);
break;
case -ETIMEDOUT:
- PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
+ PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev));
LCS_DBF_TEXT(2, trace, "ckirberr");
LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT);
break;
default:
PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
- cdev->dev.bus_id);
+ dev_name(&cdev->dev));
LCS_DBF_TEXT(2, trace, "ckirberr");
LCS_DBF_TEXT(2, trace, " rc???");
}
@@ -1390,7 +1394,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
- LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id);
+ LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev));
LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
irb->scsw.cmd.dstat);
LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
@@ -1400,7 +1404,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
rc = lcs_get_problem(cdev, irb);
if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) {
PRINT_WARN("check on device %s, dstat=0x%X, cstat=0x%X \n",
- cdev->dev.bus_id, dstat, cstat);
+ dev_name(&cdev->dev), dstat, cstat);
if (rc) {
channel->state = LCS_CH_STATE_ERROR;
}
@@ -1463,7 +1467,7 @@ lcs_tasklet(unsigned long data)
int rc;
channel = (struct lcs_channel *) data;
- LCS_DBF_TEXT_(5, trace, "tlet%s",channel->ccwdev->dev.bus_id);
+ LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
/* Check for processed buffers. */
iob = channel->iob;
@@ -2244,7 +2248,7 @@ lcs_recovery(void *ptr)
return 0;
LCS_DBF_TEXT(4, trace, "recover2");
gdev = card->gdev;
- PRINT_WARN("Recovery of device %s started...\n", gdev->dev.bus_id);
+ PRINT_WARN("Recovery of device %s started...\n", dev_name(&gdev->dev));
rc = __lcs_shutdown_device(gdev, 1);
rc = lcs_new_device(gdev);
if (!rc)
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 9242b5acc66..0fea51e34b5 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1724,7 +1724,7 @@ static int netiucv_register_device(struct net_device *ndev)
IUCV_DBF_TEXT(trace, 3, __func__);
if (dev) {
- snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
+ dev_set_name(dev, "net%s", ndev->name);
dev->bus = &iucv_bus;
dev->parent = iucv_root;
/*
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index bf8a75c92f2..af6d6045851 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -90,11 +90,11 @@ struct qeth_dbf_info {
#define CARD_RDEV(card) card->read.ccwdev
#define CARD_WDEV(card) card->write.ccwdev
#define CARD_DDEV(card) card->data.ccwdev
-#define CARD_BUS_ID(card) card->gdev->dev.bus_id
-#define CARD_RDEV_ID(card) card->read.ccwdev->dev.bus_id
-#define CARD_WDEV_ID(card) card->write.ccwdev->dev.bus_id
-#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id
-#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id
+#define CARD_BUS_ID(card) dev_name(&card->gdev->dev)
+#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
+#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
+#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
+#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
/**
* card stuff
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c7ab1b86451..141b2bb7b3c 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -745,7 +745,7 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
QETH_DBF_TEXT(TRACE, 2, "CGENCHK");
PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ",
- cdev->dev.bus_id, dstat, cstat);
+ dev_name(&cdev->dev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
return 1;
@@ -784,12 +784,12 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
switch (PTR_ERR(irb)) {
case -EIO:
- PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id);
+ PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev));
QETH_DBF_TEXT(TRACE, 2, "ckirberr");
QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO);
break;
case -ETIMEDOUT:
- PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
+ PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev));
QETH_DBF_TEXT(TRACE, 2, "ckirberr");
QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT);
if (intparm == QETH_RCD_PARM) {
@@ -803,7 +803,7 @@ static long __qeth_check_irb_error(struct ccw_device *cdev,
break;
default:
PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
- cdev->dev.bus_id);
+ dev_name(&cdev->dev));
QETH_DBF_TEXT(TRACE, 2, "ckirberr");
QETH_DBF_TEXT(TRACE, 2, " rc???");
}
@@ -4081,7 +4081,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
if (!get_device(dev))
return -ENODEV;
- QETH_DBF_TEXT_(SETUP, 2, "%s", gdev->dev.bus_id);
+ QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
card = qeth_alloc_card();
if (!card) {
diff --git a/drivers/s390/s390_rdev.c b/drivers/s390/s390_rdev.c
index 3c7145d9f9a..64371c05a3b 100644
--- a/drivers/s390/s390_rdev.c
+++ b/drivers/s390/s390_rdev.c
@@ -30,7 +30,7 @@ s390_root_dev_register(const char *name)
dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
- strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
+ dev_set_name(dev, name);
dev->release = s390_root_dev_release;
ret = device_register(dev);
if (ret) {
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 90abfd06ed5..3b56220fb90 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -88,11 +88,13 @@ static int __init zfcp_device_setup(char *devstr)
strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE);
token = strsep(&str, ",");
- if (!token || strict_strtoull(token, 0, &zfcp_data.init_wwpn))
+ if (!token || strict_strtoull(token, 0,
+ (unsigned long long *) &zfcp_data.init_wwpn))
goto err_out;
token = strsep(&str, ",");
- if (!token || strict_strtoull(token, 0, &zfcp_data.init_fcp_lun))
+ if (!token || strict_strtoull(token, 0,
+ (unsigned long long *) &zfcp_data.init_fcp_lun))
goto err_out;
kfree(str);
@@ -100,24 +102,10 @@ static int __init zfcp_device_setup(char *devstr)
err_out:
kfree(str);
- pr_err("zfcp: Parse error for device parameter string %s, "
- "device not attached.\n", devstr);
+ pr_err("zfcp: %s is not a valid SCSI device\n", devstr);
return 0;
}
-static struct zfcp_adapter *zfcp_get_adapter_by_busid(char *bus_id)
-{
- struct zfcp_adapter *adapter;
-
- list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list)
- if ((strncmp(bus_id, adapter->ccw_device->dev.bus_id,
- BUS_ID_SIZE) == 0) &&
- !(atomic_read(&adapter->status) &
- ZFCP_STATUS_COMMON_REMOVE))
- return adapter;
- return NULL;
-}
-
static void __init zfcp_init_device_configure(void)
{
struct zfcp_adapter *adapter;
@@ -141,7 +129,12 @@ static void __init zfcp_init_device_configure(void)
goto out_unit;
up(&zfcp_data.config_sema);
ccw_device_set_online(adapter->ccw_device);
+
zfcp_erp_wait(adapter);
+ wait_event(adapter->erp_done_wqh,
+ !(atomic_read(&unit->status) &
+ ZFCP_STATUS_UNIT_SCSI_WORK_PENDING));
+
down(&zfcp_data.config_sema);
zfcp_unit_put(unit);
out_unit:
@@ -180,9 +173,9 @@ static int __init zfcp_module_init(void)
if (!zfcp_data.gid_pn_cache)
goto out_gid_cache;
- INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
- INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
+ zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
+ INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
sema_init(&zfcp_data.config_sema, 1);
rwlock_init(&zfcp_data.config_lock);
@@ -193,13 +186,14 @@ static int __init zfcp_module_init(void)
retval = misc_register(&zfcp_cfdc_misc);
if (retval) {
- pr_err("zfcp: registration of misc device zfcp_cfdc failed\n");
+ pr_err("zfcp: Registering the misc device zfcp_cfdc failed\n");
goto out_misc;
}
retval = zfcp_ccw_register();
if (retval) {
- pr_err("zfcp: Registration with common I/O layer failed.\n");
+ pr_err("zfcp: The zfcp device driver could not register with "
+ "the common I/O layer\n");
goto out_ccw_register;
}
@@ -231,8 +225,7 @@ module_init(zfcp_module_init);
*
* Returns: pointer to zfcp_unit or NULL
*/
-struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port,
- fcp_lun_t fcp_lun)
+struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
@@ -251,7 +244,7 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port,
* Returns: pointer to zfcp_port or NULL
*/
struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
- wwn_t wwpn)
+ u64 wwpn)
{
struct zfcp_port *port;
@@ -276,7 +269,7 @@ static void zfcp_sysfs_unit_release(struct device *dev)
*
* Sets up some unit internal structures and creates sysfs entry.
*/
-struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
+struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
@@ -290,7 +283,8 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
unit->port = port;
unit->fcp_lun = fcp_lun;
- snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun);
+ dev_set_name(&unit->sysfs_device, "0x%016llx",
+ (unsigned long long) fcp_lun);
unit->sysfs_device.parent = &port->sysfs_device;
unit->sysfs_device.release = zfcp_sysfs_unit_release;
dev_set_drvdata(&unit->sysfs_device, unit);
@@ -323,7 +317,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
}
zfcp_unit_get(unit);
- unit->scsi_lun = scsilun_to_int((struct scsi_lun *)&unit->fcp_lun);
write_lock_irq(&zfcp_data.config_lock);
list_add_tail(&unit->list, &port->unit_list_head);
@@ -332,7 +325,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
write_unlock_irq(&zfcp_data.config_lock);
- port->units++;
zfcp_port_get(port);
return unit;
@@ -351,11 +343,10 @@ err_out_free:
*/
void zfcp_unit_dequeue(struct zfcp_unit *unit)
{
- zfcp_unit_wait(unit);
+ wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
write_lock_irq(&zfcp_data.config_lock);
list_del(&unit->list);
write_unlock_irq(&zfcp_data.config_lock);
- unit->port->units--;
zfcp_port_put(unit->port);
sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
device_unregister(&unit->sysfs_device);
@@ -416,11 +407,6 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
mempool_destroy(adapter->pool.data_gid_pn);
}
-static void zfcp_dummy_release(struct device *dev)
-{
- return;
-}
-
/**
* zfcp_status_read_refill - refill the long running status_read_requests
* @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
@@ -450,19 +436,6 @@ static void _zfcp_status_read_scheduler(struct work_struct *work)
stat_work));
}
-static int zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
-{
- struct zfcp_port *port;
-
- port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
- ZFCP_DID_DIRECTORY_SERVICE);
- if (IS_ERR(port))
- return PTR_ERR(port);
- zfcp_port_put(port);
-
- return 0;
-}
-
/**
* zfcp_adapter_enqueue - enqueue a new adapter to the list
* @ccw_device: pointer to the struct cc_device
@@ -508,7 +481,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
init_waitqueue_head(&adapter->erp_done_wqh);
INIT_LIST_HEAD(&adapter->port_list_head);
- INIT_LIST_HEAD(&adapter->port_remove_lh);
INIT_LIST_HEAD(&adapter->erp_ready_head);
INIT_LIST_HEAD(&adapter->erp_running_head);
@@ -518,7 +490,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
spin_lock_init(&adapter->san_dbf_lock);
spin_lock_init(&adapter->scsi_dbf_lock);
spin_lock_init(&adapter->rec_dbf_lock);
- spin_lock_init(&adapter->req_q.lock);
+ spin_lock_init(&adapter->req_q_lock);
rwlock_init(&adapter->erp_lock);
rwlock_init(&adapter->abort_lock);
@@ -537,28 +509,15 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
&zfcp_sysfs_adapter_attrs))
goto sysfs_failed;
- adapter->generic_services.parent = &adapter->ccw_device->dev;
- adapter->generic_services.release = zfcp_dummy_release;
- snprintf(adapter->generic_services.bus_id, BUS_ID_SIZE,
- "generic_services");
-
- if (device_register(&adapter->generic_services))
- goto generic_services_failed;
-
write_lock_irq(&zfcp_data.config_lock);
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
write_unlock_irq(&zfcp_data.config_lock);
- zfcp_data.adapters++;
-
- zfcp_nameserver_enqueue(adapter);
+ zfcp_fc_nameserver_init(adapter);
return 0;
-generic_services_failed:
- sysfs_remove_group(&ccw_device->dev.kobj,
- &zfcp_sysfs_adapter_attrs);
sysfs_failed:
zfcp_adapter_debug_unregister(adapter);
debug_register_failed:
@@ -585,7 +544,6 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
cancel_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
zfcp_adapter_scsi_unregister(adapter);
- device_unregister(&adapter->generic_services);
sysfs_remove_group(&adapter->ccw_device->dev.kobj,
&zfcp_sysfs_adapter_attrs);
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
@@ -603,9 +561,6 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
list_del(&adapter->list);
write_unlock_irq(&zfcp_data.config_lock);
- /* decrease number of adapters in list */
- zfcp_data.adapters--;
-
zfcp_qdio_free(adapter);
zfcp_free_low_mem_buffers(adapter);
@@ -633,21 +588,19 @@ static void zfcp_sysfs_port_release(struct device *dev)
* d_id is used to enqueue ports with a well known address like the Directory
* Service for nameserver lookup.
*/
-struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
+struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
u32 status, u32 d_id)
{
struct zfcp_port *port;
int retval;
- char *bus_id;
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
return ERR_PTR(-ENOMEM);
init_waitqueue_head(&port->remove_wq);
-
INIT_LIST_HEAD(&port->unit_list_head);
- INIT_LIST_HEAD(&port->unit_remove_lh);
+ INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup);
port->adapter = adapter;
port->d_id = d_id;
@@ -657,34 +610,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
atomic_set(&port->refcount, 0);
- if (status & ZFCP_STATUS_PORT_WKA) {
- switch (d_id) {
- case ZFCP_DID_DIRECTORY_SERVICE:
- bus_id = "directory";
- break;
- case ZFCP_DID_MANAGEMENT_SERVICE:
- bus_id = "management";
- break;
- case ZFCP_DID_KEY_DISTRIBUTION_SERVICE:
- bus_id = "key_distribution";
- break;
- case ZFCP_DID_ALIAS_SERVICE:
- bus_id = "alias";
- break;
- case ZFCP_DID_TIME_SERVICE:
- bus_id = "time";
- break;
- default:
- kfree(port);
- return ERR_PTR(-EINVAL);
- }
- snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "%s", bus_id);
- port->sysfs_device.parent = &adapter->generic_services;
- } else {
- snprintf(port->sysfs_device.bus_id,
- BUS_ID_SIZE, "0x%016llx", wwpn);
- port->sysfs_device.parent = &adapter->ccw_device->dev;
- }
+ dev_set_name(&port->sysfs_device, "0x%016llx", wwpn);
+ port->sysfs_device.parent = &adapter->ccw_device->dev;
port->sysfs_device.release = zfcp_sysfs_port_release;
dev_set_drvdata(&port->sysfs_device, port);
@@ -700,12 +627,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
if (device_register(&port->sysfs_device))
goto err_out_free;
- if (status & ZFCP_STATUS_PORT_WKA)
- retval = sysfs_create_group(&port->sysfs_device.kobj,
- &zfcp_sysfs_ns_port_attrs);
- else
- retval = sysfs_create_group(&port->sysfs_device.kobj,
- &zfcp_sysfs_port_attrs);
+ retval = sysfs_create_group(&port->sysfs_device.kobj,
+ &zfcp_sysfs_port_attrs);
if (retval) {
device_unregister(&port->sysfs_device);
@@ -718,10 +641,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
list_add_tail(&port->list, &adapter->port_list_head);
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
- if (d_id == ZFCP_DID_DIRECTORY_SERVICE)
- if (!adapter->nameserver_port)
- adapter->nameserver_port = port;
- adapter->ports++;
write_unlock_irq(&zfcp_data.config_lock);
@@ -740,21 +659,15 @@ err_out:
*/
void zfcp_port_dequeue(struct zfcp_port *port)
{
- zfcp_port_wait(port);
+ wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
write_lock_irq(&zfcp_data.config_lock);
list_del(&port->list);
- port->adapter->ports--;
write_unlock_irq(&zfcp_data.config_lock);
if (port->rport)
fc_remote_port_delete(port->rport);
port->rport = NULL;
zfcp_adapter_put(port->adapter);
- if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
- sysfs_remove_group(&port->sysfs_device.kobj,
- &zfcp_sysfs_ns_port_attrs);
- else
- sysfs_remove_group(&port->sysfs_device.kobj,
- &zfcp_sysfs_port_attrs);
+ sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
device_unregister(&port->sysfs_device);
}
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 51b6a05f4d1..b04038c7478 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -25,7 +25,8 @@ static int zfcp_ccw_probe(struct ccw_device *ccw_device)
down(&zfcp_data.config_sema);
if (zfcp_adapter_enqueue(ccw_device)) {
dev_err(&ccw_device->dev,
- "Setup of data structures failed.\n");
+ "Setting up data structures for the "
+ "FCP adapter failed\n");
retval = -EINVAL;
}
up(&zfcp_data.config_sema);
@@ -46,6 +47,8 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
struct zfcp_adapter *adapter;
struct zfcp_port *port, *p;
struct zfcp_unit *unit, *u;
+ LIST_HEAD(unit_remove_lh);
+ LIST_HEAD(port_remove_lh);
ccw_device_set_offline(ccw_device);
down(&zfcp_data.config_sema);
@@ -54,26 +57,26 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
write_lock_irq(&zfcp_data.config_lock);
list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
- list_move(&unit->list, &port->unit_remove_lh);
+ list_move(&unit->list, &unit_remove_lh);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
&unit->status);
}
- list_move(&port->list, &adapter->port_remove_lh);
+ list_move(&port->list, &port_remove_lh);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
}
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
write_unlock_irq(&zfcp_data.config_lock);
- list_for_each_entry_safe(port, p, &adapter->port_remove_lh, list) {
- list_for_each_entry_safe(unit, u, &port->unit_remove_lh, list) {
- if (atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED,
- &unit->status))
+ list_for_each_entry_safe(port, p, &port_remove_lh, list) {
+ list_for_each_entry_safe(unit, u, &unit_remove_lh, list) {
+ if (atomic_read(&unit->status) &
+ ZFCP_STATUS_UNIT_REGISTERED)
scsi_remove_device(unit->device);
zfcp_unit_dequeue(unit);
}
zfcp_port_dequeue(port);
}
- zfcp_adapter_wait(adapter);
+ wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
zfcp_adapter_dequeue(adapter);
up(&zfcp_data.config_sema);
@@ -156,15 +159,18 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
switch (event) {
case CIO_GONE:
- dev_warn(&adapter->ccw_device->dev, "device gone\n");
+ dev_warn(&adapter->ccw_device->dev,
+ "The FCP device has been detached\n");
zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL);
break;
case CIO_NO_PATH:
- dev_warn(&adapter->ccw_device->dev, "no path\n");
+ dev_warn(&adapter->ccw_device->dev,
+ "The CHPID for the FCP device is offline\n");
zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL);
break;
case CIO_OPER:
- dev_info(&adapter->ccw_device->dev, "operational again\n");
+ dev_info(&adapter->ccw_device->dev,
+ "The FCP device is operational again\n");
zfcp_erp_modify_adapter_status(adapter, 11, NULL,
ZFCP_STATUS_COMMON_RUNNING,
ZFCP_SET);
@@ -220,3 +226,20 @@ int __init zfcp_ccw_register(void)
{
return ccw_driver_register(&zfcp_ccw_driver);
}
+
+/**
+ * zfcp_get_adapter_by_busid - find zfcp_adapter struct
+ * @busid: bus id string of zfcp adapter to find
+ */
+struct zfcp_adapter *zfcp_get_adapter_by_busid(char *busid)
+{
+ struct ccw_device *ccw_device;
+ struct zfcp_adapter *adapter = NULL;
+
+ ccw_device = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
+ if (ccw_device) {
+ adapter = dev_get_drvdata(&ccw_device->dev);
+ put_device(&ccw_device->dev);
+ }
+ return adapter;
+}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index fca48b88fc5..060f5f2352e 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -318,6 +318,26 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
}
+/**
+ * zfcp_hba_dbf_event_berr - trace event for bit error threshold
+ * @adapter: adapter affected by this QDIO related event
+ * @req: fsf request
+ */
+void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter,
+ struct zfcp_fsf_req *req)
+{
+ struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
+ struct fsf_status_read_buffer *sr_buf = req->data;
+ struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+ memset(r, 0, sizeof(*r));
+ strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE);
+ memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload));
+ debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
+ spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+}
static void zfcp_hba_dbf_view_response(char **p,
struct zfcp_hba_dbf_record_response *r)
{
@@ -399,6 +419,30 @@ static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
}
+static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r)
+{
+ zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count);
+ zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count);
+ zfcp_dbf_out(p, "loss_of_sig_err", "%d", r->loss_of_signal_error_count);
+ zfcp_dbf_out(p, "prim_seq_err", "%d",
+ r->primitive_sequence_error_count);
+ zfcp_dbf_out(p, "inval_trans_word_err", "%d",
+ r->invalid_transmission_word_error_count);
+ zfcp_dbf_out(p, "CRC_errors", "%d", r->crc_error_count);
+ zfcp_dbf_out(p, "prim_seq_event_to", "%d",
+ r->primitive_sequence_event_timeout_count);
+ zfcp_dbf_out(p, "elast_buf_overrun_err", "%d",
+ r->elastic_buffer_overrun_error_count);
+ zfcp_dbf_out(p, "adv_rec_buf2buf_cred", "%d",
+ r->advertised_receive_b2b_credit);
+ zfcp_dbf_out(p, "curr_rec_buf2buf_cred", "%d",
+ r->current_receive_b2b_credit);
+ zfcp_dbf_out(p, "adv_trans_buf2buf_cred", "%d",
+ r->advertised_transmit_b2b_credit);
+ zfcp_dbf_out(p, "curr_trans_buf2buf_cred", "%d",
+ r->current_transmit_b2b_credit);
+}
+
static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
char *out_buf, const char *in_buf)
{
@@ -418,6 +462,8 @@ static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
zfcp_hba_dbf_view_status(&p, &r->u.status);
else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
zfcp_hba_dbf_view_qdio(&p, &r->u.qdio);
+ else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0)
+ zfcp_hba_dbf_view_berr(&p, &r->u.berr);
p += sprintf(p, "\n");
return p - out_buf;
@@ -519,14 +565,14 @@ static const char *zfcp_rec_dbf_ids[] = {
[75] = "physical port recovery escalation after failed port "
"recovery",
[76] = "port recovery escalation after failed unit recovery",
- [77] = "recovery opening nameserver port",
+ [77] = "",
[78] = "duplicate request id",
[79] = "link down",
[80] = "exclusive read-only unit access unsupported",
[81] = "shared read-write unit access unsupported",
[82] = "incoming rscn",
[83] = "incoming wwpn",
- [84] = "",
+ [84] = "wka port handle not valid close port",
[85] = "online",
[86] = "offline",
[87] = "ccw device gone",
@@ -570,7 +616,7 @@ static const char *zfcp_rec_dbf_ids[] = {
[125] = "need newer zfcp",
[126] = "need newer microcode",
[127] = "arbitrated loop not supported",
- [128] = "unknown topology",
+ [128] = "",
[129] = "qtcb size mismatch",
[130] = "unknown fsf status ecd",
[131] = "fcp request too big",
@@ -829,9 +875,9 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
{
struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
- struct zfcp_port *port = ct->port;
- struct zfcp_adapter *adapter = port->adapter;
- struct ct_hdr *hdr = zfcp_sg_to_address(ct->req);
+ struct zfcp_wka_port *wka_port = ct->wka_port;
+ struct zfcp_adapter *adapter = wka_port->adapter;
+ struct ct_hdr *hdr = sg_virt(ct->req);
struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req;
unsigned long flags;
@@ -842,7 +888,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
r->fsf_reqid = (unsigned long)fsf_req;
r->fsf_seqno = fsf_req->seq_no;
r->s_id = fc_host_port_id(adapter->scsi_host);
- r->d_id = port->d_id;
+ r->d_id = wka_port->d_id;
oct->cmd_req_code = hdr->cmd_rsp_code;
oct->revision = hdr->revision;
oct->gs_type = hdr->gs_type;
@@ -863,9 +909,9 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
{
struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
- struct zfcp_port *port = ct->port;
- struct zfcp_adapter *adapter = port->adapter;
- struct ct_hdr *hdr = zfcp_sg_to_address(ct->resp);
+ struct zfcp_wka_port *wka_port = ct->wka_port;
+ struct zfcp_adapter *adapter = wka_port->adapter;
+ struct ct_hdr *hdr = sg_virt(ct->resp);
struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp;
unsigned long flags;
@@ -875,7 +921,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
r->fsf_reqid = (unsigned long)fsf_req;
r->fsf_seqno = fsf_req->seq_no;
- r->s_id = port->d_id;
+ r->s_id = wka_port->d_id;
r->d_id = fc_host_port_id(adapter->scsi_host);
rct->cmd_rsp_code = hdr->cmd_rsp_code;
rct->revision = hdr->revision;
@@ -922,8 +968,8 @@ void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
zfcp_san_dbf_event_els("oels", 2, fsf_req,
fc_host_port_id(els->adapter->scsi_host),
- els->d_id, *(u8 *) zfcp_sg_to_address(els->req),
- zfcp_sg_to_address(els->req), els->req->length);
+ els->d_id, *(u8 *) sg_virt(els->req),
+ sg_virt(els->req), els->req->length);
}
/**
@@ -936,8 +982,7 @@ void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id,
fc_host_port_id(els->adapter->scsi_host),
- *(u8 *)zfcp_sg_to_address(els->req),
- zfcp_sg_to_address(els->resp),
+ *(u8 *)sg_virt(els->req), sg_virt(els->resp),
els->resp->length);
}
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 0ddb18449d1..e8f450801fe 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -151,6 +151,7 @@ struct zfcp_hba_dbf_record {
struct zfcp_hba_dbf_record_response response;
struct zfcp_hba_dbf_record_status status;
struct zfcp_hba_dbf_record_qdio qdio;
+ struct fsf_bit_error_payload berr;
} u;
} __attribute__ ((packed));
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 67f45fc62f5..8a13071c444 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -39,29 +39,6 @@
/********************* GENERAL DEFINES *********************************/
-/**
- * zfcp_sg_to_address - determine kernel address from struct scatterlist
- * @list: struct scatterlist
- * Return: kernel address
- */
-static inline void *
-zfcp_sg_to_address(struct scatterlist *list)
-{
- return sg_virt(list);
-}
-
-/**
- * zfcp_address_to_sg - set up struct scatterlist from kernel address
- * @address: kernel address
- * @list: struct scatterlist
- * @size: buffer size
- */
-static inline void
-zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
-{
- sg_set_buf(list, address, size);
-}
-
#define REQUEST_LIST_SIZE 128
/********************* SCSI SPECIFIC DEFINES *********************************/
@@ -101,11 +78,6 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
-typedef unsigned long long wwn_t;
-typedef unsigned long long fcp_lun_t;
-/* data length field may be at variable position in FCP-2 FCP_CMND IU */
-typedef unsigned int fcp_dl_t;
-
/* timeout for name-server lookup (in seconds) */
#define ZFCP_NS_GID_PN_TIMEOUT 10
@@ -129,7 +101,7 @@ typedef unsigned int fcp_dl_t;
/* FCP(-2) FCP_CMND IU */
struct fcp_cmnd_iu {
- fcp_lun_t fcp_lun; /* FCP logical unit number */
+ u64 fcp_lun; /* FCP logical unit number */
u8 crn; /* command reference number */
u8 reserved0:5; /* reserved */
u8 task_attribute:3; /* task attribute */
@@ -204,7 +176,7 @@ struct fcp_rscn_element {
struct fcp_logo {
u32 command;
u32 nport_did;
- wwn_t nport_wwpn;
+ u64 nport_wwpn;
} __attribute__((packed));
/*
@@ -218,13 +190,6 @@ struct fcp_logo {
#define ZFCP_LS_RSCN 0x61
#define ZFCP_LS_RNID 0x78
-struct zfcp_ls_rjt_par {
- u8 action;
- u8 reason_code;
- u8 reason_expl;
- u8 vendor_unique;
-} __attribute__ ((packed));
-
struct zfcp_ls_adisc {
u8 code;
u8 field[3];
@@ -234,20 +199,6 @@ struct zfcp_ls_adisc {
u32 nport_id;
} __attribute__ ((packed));
-struct zfcp_ls_adisc_acc {
- u8 code;
- u8 field[3];
- u32 hard_nport_id;
- u64 wwpn;
- u64 wwnn;
- u32 nport_id;
-} __attribute__ ((packed));
-
-struct zfcp_rc_entry {
- u8 code;
- const char *description;
-};
-
/*
* FC-GS-2 stuff
*/
@@ -281,9 +232,7 @@ struct zfcp_rc_entry {
#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
-#define ZFCP_STATUS_COMMON_OPENING 0x08000000
#define ZFCP_STATUS_COMMON_OPEN 0x04000000
-#define ZFCP_STATUS_COMMON_CLOSING 0x02000000
#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000
#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000
#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000
@@ -291,16 +240,15 @@ struct zfcp_rc_entry {
/* adapter status */
#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
-#define ZFCP_STATUS_ADAPTER_REGISTERED 0x00000004
#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020
#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
-#define ZFCP_STATUS_ADAPTER_XPORT_OK 0x00000800
/* FC-PH/FC-GS well-known address identifiers for generic services */
+#define ZFCP_DID_WKA 0xFFFFF0
#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA
#define ZFCP_DID_TIME_SERVICE 0xFFFFFB
#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC
@@ -312,29 +260,27 @@ struct zfcp_rc_entry {
#define ZFCP_STATUS_PORT_DID_DID 0x00000002
#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004
#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008
-#define ZFCP_STATUS_PORT_NO_SCSI_ID 0x00000010
#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020
-/* for ports with well known addresses */
-#define ZFCP_STATUS_PORT_WKA \
- (ZFCP_STATUS_PORT_NO_WWPN | \
- ZFCP_STATUS_PORT_NO_SCSI_ID)
+/* well known address (WKA) port status*/
+enum zfcp_wka_status {
+ ZFCP_WKA_PORT_OFFLINE,
+ ZFCP_WKA_PORT_CLOSING,
+ ZFCP_WKA_PORT_OPENING,
+ ZFCP_WKA_PORT_ONLINE,
+};
/* logical unit status */
-#define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002
#define ZFCP_STATUS_UNIT_SHARED 0x00000004
#define ZFCP_STATUS_UNIT_READONLY 0x00000008
#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010
#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020
/* FSF request status (this does not have a common part) */
-#define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000
-#define ZFCP_STATUS_FSFREQ_POOL 0x00000001
#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004
#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
-#define ZFCP_STATUS_FSFREQ_ABORTING 0x00000020
#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
@@ -379,7 +325,7 @@ struct ct_hdr {
* a port name is required */
struct ct_iu_gid_pn_req {
struct ct_hdr header;
- wwn_t wwpn;
+ u64 wwpn;
} __attribute__ ((packed));
/* FS_ACC IU and data unit for GID_PN nameserver request */
@@ -388,11 +334,9 @@ struct ct_iu_gid_pn_resp {
u32 d_id;
} __attribute__ ((packed));
-typedef void (*zfcp_send_ct_handler_t)(unsigned long);
-
/**
* struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
- * @port: port where the request is sent to
+ * @wka_port: port where the request is sent to
* @req: scatter-gather list for request
* @resp: scatter-gather list for response
* @req_count: number of elements in request scatter-gather list
@@ -404,12 +348,12 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long);
* @status: used to pass error status to calling function
*/
struct zfcp_send_ct {
- struct zfcp_port *port;
+ struct zfcp_wka_port *wka_port;
struct scatterlist *req;
struct scatterlist *resp;
unsigned int req_count;
unsigned int resp_count;
- zfcp_send_ct_handler_t handler;
+ void (*handler)(unsigned long);
unsigned long handler_data;
int timeout;
struct completion *completion;
@@ -426,8 +370,6 @@ struct zfcp_gid_pn_data {
struct zfcp_port *port;
};
-typedef void (*zfcp_send_els_handler_t)(unsigned long);
-
/**
* struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
* @adapter: adapter where request is sent from
@@ -451,22 +393,28 @@ struct zfcp_send_els {
struct scatterlist *resp;
unsigned int req_count;
unsigned int resp_count;
- zfcp_send_els_handler_t handler;
+ void (*handler)(unsigned long);
unsigned long handler_data;
struct completion *completion;
int ls_code;
int status;
};
+struct zfcp_wka_port {
+ struct zfcp_adapter *adapter;
+ wait_queue_head_t completion_wq;
+ enum zfcp_wka_status status;
+ atomic_t refcount;
+ u32 d_id;
+ u32 handle;
+ struct mutex mutex;
+ struct delayed_work work;
+};
+
struct zfcp_qdio_queue {
- struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
- u8 first; /* index of next free bfr
- in queue (free_count>0) */
- atomic_t count; /* number of free buffers
- in queue */
- spinlock_t lock; /* lock for operations on queue */
- int pci_batch; /* SBALs since PCI indication
- was last set */
+ struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
+ u8 first; /* index of next free bfr in queue */
+ atomic_t count; /* number of free buffers in queue */
};
struct zfcp_erp_action {
@@ -475,7 +423,7 @@ struct zfcp_erp_action {
struct zfcp_adapter *adapter; /* device which should be recovered */
struct zfcp_port *port;
struct zfcp_unit *unit;
- volatile u32 status; /* recovery status */
+ u32 status; /* recovery status */
u32 step; /* active step of this erp action */
struct zfcp_fsf_req *fsf_req; /* fsf request currently pending
for this action */
@@ -506,8 +454,8 @@ struct zfcp_adapter {
atomic_t refcount; /* reference count */
wait_queue_head_t remove_wq; /* can be used to wait for
refcount drop to zero */
- wwn_t peer_wwnn; /* P2P peer WWNN */
- wwn_t peer_wwpn; /* P2P peer WWPN */
+ u64 peer_wwnn; /* P2P peer WWNN */
+ u64 peer_wwpn; /* P2P peer WWPN */
u32 peer_d_id; /* P2P peer D_ID */
struct ccw_device *ccw_device; /* S/390 ccw device */
u32 hydra_version; /* Hydra version */
@@ -518,13 +466,13 @@ struct zfcp_adapter {
u16 timer_ticks; /* time int for a tick */
struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
struct list_head port_list_head; /* remote port list */
- struct list_head port_remove_lh; /* head of ports to be
- removed */
- u32 ports; /* number of remote ports */
unsigned long req_no; /* unique FSF req number */
struct list_head *req_list; /* list of pending reqs */
spinlock_t req_list_lock; /* request list lock */
struct zfcp_qdio_queue req_q; /* request queue */
+ spinlock_t req_q_lock; /* for operations on queue */
+ int req_q_pci_batch; /* SBALs since PCI indication
+ was last set */
u32 fsf_req_seq_no; /* FSF cmnd seq number */
wait_queue_head_t request_wq; /* can be used to wait for
more avaliable SBALs */
@@ -548,7 +496,7 @@ struct zfcp_adapter {
actions */
u32 erp_low_mem_count; /* nr of erp actions waiting
for memory */
- struct zfcp_port *nameserver_port; /* adapter's nameserver */
+ struct zfcp_wka_port nsp; /* adapter's nameserver */
debug_info_t *rec_dbf;
debug_info_t *hba_dbf;
debug_info_t *san_dbf; /* debug feature areas */
@@ -563,11 +511,11 @@ struct zfcp_adapter {
struct zfcp_scsi_dbf_record scsi_dbf_buf;
struct zfcp_adapter_mempool pool; /* Adapter memory pools */
struct qdio_initialize qdio_init_data; /* for qdio_establish */
- struct device generic_services; /* directory for WKA ports */
struct fc_host_statistics *fc_stats;
struct fsf_qtcb_bottom_port *stats_reset_data;
unsigned long stats_reset;
struct work_struct scan_work;
+ atomic_t qdio_outb_full; /* queue full incidents */
};
struct zfcp_port {
@@ -579,18 +527,16 @@ struct zfcp_port {
refcount drop to zero */
struct zfcp_adapter *adapter; /* adapter used to access port */
struct list_head unit_list_head; /* head of logical unit list */
- struct list_head unit_remove_lh; /* head of luns to be removed
- list */
- u32 units; /* # of logical units in list */
atomic_t status; /* status of this remote port */
- wwn_t wwnn; /* WWNN if known */
- wwn_t wwpn; /* WWPN */
+ u64 wwnn; /* WWNN if known */
+ u64 wwpn; /* WWPN */
u32 d_id; /* D_ID */
u32 handle; /* handle assigned by FSF */
struct zfcp_erp_action erp_action; /* pending error recovery */
atomic_t erp_counter;
u32 maxframe_size;
u32 supported_classes;
+ struct work_struct gid_pn_work;
};
struct zfcp_unit {
@@ -601,8 +547,7 @@ struct zfcp_unit {
refcount drop to zero */
struct zfcp_port *port; /* remote port of unit */
atomic_t status; /* status of this logical unit */
- unsigned int scsi_lun; /* own SCSI LUN */
- fcp_lun_t fcp_lun; /* own FCP_LUN */
+ u64 fcp_lun; /* own FCP_LUN */
u32 handle; /* handle assigned by FSF */
struct scsi_device *device; /* scsi device struct pointer */
struct zfcp_erp_action erp_action; /* pending error recovery */
@@ -625,7 +570,7 @@ struct zfcp_fsf_req {
u8 sbal_response; /* SBAL used in interrupt */
wait_queue_head_t completion_wq; /* can be used by a routine
to wait for completion */
- volatile u32 status; /* status of this request */
+ u32 status; /* status of this request */
u32 fsf_command; /* FSF Command copy */
struct fsf_qtcb *qtcb; /* address of associated QTCB */
u32 seq_no; /* Sequence number of request */
@@ -644,23 +589,20 @@ struct zfcp_fsf_req {
struct zfcp_data {
struct scsi_host_template scsi_host_template;
struct scsi_transport_template *scsi_transport_template;
- atomic_t status; /* Module status flags */
struct list_head adapter_list_head; /* head of adapter list */
- struct list_head adapter_remove_lh; /* head of adapters to be
- removed */
- u32 adapters; /* # of adapters in list */
rwlock_t config_lock; /* serialises changes
to adapter/port/unit
lists */
struct semaphore config_sema; /* serialises configuration
changes */
atomic_t loglevel; /* current loglevel */
- char init_busid[BUS_ID_SIZE];
- wwn_t init_wwpn;
- fcp_lun_t init_fcp_lun;
- struct kmem_cache *fsf_req_qtcb_cache;
- struct kmem_cache *sr_buffer_cache;
- struct kmem_cache *gid_pn_cache;
+ char init_busid[20];
+ u64 init_wwpn;
+ u64 init_fcp_lun;
+ struct kmem_cache *fsf_req_qtcb_cache;
+ struct kmem_cache *sr_buffer_cache;
+ struct kmem_cache *gid_pn_cache;
+ struct workqueue_struct *work_queue;
};
/* struct used by memory pools for fsf_requests */
@@ -677,14 +619,7 @@ struct zfcp_fsf_req_qtcb {
#define ZFCP_SET 0x00000100
#define ZFCP_CLEAR 0x00000200
-#ifndef atomic_test_mask
-#define atomic_test_mask(mask, target) \
- ((atomic_read(target) & mask) == mask)
-#endif
-
-#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
-#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
-#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
+#define zfcp_get_busid_by_adapter(adapter) (dev_name(&adapter->ccw_device->dev))
/*
* Helper functions for request ID management.
@@ -745,12 +680,6 @@ zfcp_unit_put(struct zfcp_unit *unit)
}
static inline void
-zfcp_unit_wait(struct zfcp_unit *unit)
-{
- wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
-}
-
-static inline void
zfcp_port_get(struct zfcp_port *port)
{
atomic_inc(&port->refcount);
@@ -764,12 +693,6 @@ zfcp_port_put(struct zfcp_port *port)
}
static inline void
-zfcp_port_wait(struct zfcp_port *port)
-{
- wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
-}
-
-static inline void
zfcp_adapter_get(struct zfcp_adapter *adapter)
{
atomic_inc(&adapter->refcount);
@@ -782,10 +705,4 @@ zfcp_adapter_put(struct zfcp_adapter *adapter)
wake_up(&adapter->remove_wq);
}
-static inline void
-zfcp_adapter_wait(struct zfcp_adapter *adapter)
-{
- wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
-}
-
#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 643ac4bba5b..9040f738ff3 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -23,7 +23,6 @@ enum zfcp_erp_steps {
ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
- ZFCP_ERP_STEP_NAMESERVER_OPEN = 0x0200,
ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400,
ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000,
@@ -532,8 +531,7 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
struct zfcp_port *port;
list_for_each_entry(port, &adapter->port_list_head, list)
- if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA))
- _zfcp_erp_port_reopen(port, clear, id, ref);
+ _zfcp_erp_port_reopen(port, clear, id, ref);
}
static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id,
@@ -669,8 +667,6 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
int ret;
struct zfcp_adapter *adapter = act->adapter;
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
-
write_lock_irq(&adapter->erp_lock);
zfcp_erp_action_to_running(act);
write_unlock_irq(&adapter->erp_lock);
@@ -741,8 +737,7 @@ static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
failed_qdio:
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
- ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
- ZFCP_STATUS_ADAPTER_XPORT_OK,
+ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
&act->adapter->status);
return retval;
}
@@ -751,15 +746,11 @@ static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
{
int retval;
- atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
zfcp_erp_adapter_strategy_generic(act, 1); /* close */
- atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
return ZFCP_ERP_EXIT;
- atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */
- atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
if (retval == ZFCP_ERP_FAILED)
ssleep(8);
@@ -783,10 +774,7 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
{
- atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
- ZFCP_STATUS_COMMON_CLOSING |
- ZFCP_STATUS_COMMON_ACCESS_DENIED |
- ZFCP_STATUS_PORT_DID_DID |
+ atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_PORT_PHYS_CLOSING |
ZFCP_STATUS_PORT_INVALID_WWPN,
&port->status);
@@ -839,73 +827,12 @@ static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_CONTINUES;
}
-static void zfcp_erp_port_strategy_open_ns_wake(struct zfcp_erp_action *ns_act)
-{
- unsigned long flags;
- struct zfcp_adapter *adapter = ns_act->adapter;
- struct zfcp_erp_action *act, *tmp;
- int status;
-
- read_lock_irqsave(&adapter->erp_lock, flags);
- list_for_each_entry_safe(act, tmp, &adapter->erp_running_head, list) {
- if (act->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
- status = atomic_read(&adapter->nameserver_port->status);
- if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
- zfcp_erp_port_failed(act->port, 27, NULL);
- zfcp_erp_action_ready(act);
- }
- }
- read_unlock_irqrestore(&adapter->erp_lock, flags);
-}
-
-static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *act)
-{
- int retval;
-
- switch (act->step) {
- case ZFCP_ERP_STEP_UNINITIALIZED:
- case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
- case ZFCP_ERP_STEP_PORT_CLOSING:
- return zfcp_erp_port_strategy_open_port(act);
-
- case ZFCP_ERP_STEP_PORT_OPENING:
- if (atomic_read(&act->port->status) & ZFCP_STATUS_COMMON_OPEN)
- retval = ZFCP_ERP_SUCCEEDED;
- else
- retval = ZFCP_ERP_FAILED;
- /* this is needed anyway */
- zfcp_erp_port_strategy_open_ns_wake(act);
- return retval;
-
- default:
- return ZFCP_ERP_FAILED;
- }
-}
-
-static int zfcp_erp_port_strategy_open_lookup(struct zfcp_erp_action *act)
-{
- int retval;
-
- retval = zfcp_fc_ns_gid_pn_request(act);
- if (retval == -ENOMEM)
- return ZFCP_ERP_NOMEM;
- act->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
- if (retval)
- return ZFCP_ERP_FAILED;
- return ZFCP_ERP_CONTINUES;
-}
-
static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
if (port->wwpn != adapter->peer_wwpn) {
- dev_err(&adapter->ccw_device->dev,
- "Failed to open port 0x%016Lx, "
- "Peer WWPN 0x%016Lx does not "
- "match.\n", port->wwpn,
- adapter->peer_wwpn);
zfcp_erp_port_failed(port, 25, NULL);
return ZFCP_ERP_FAILED;
}
@@ -914,11 +841,25 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
return zfcp_erp_port_strategy_open_port(act);
}
+void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
+{
+ int retval;
+ struct zfcp_port *port = container_of(work, struct zfcp_port,
+ gid_pn_work);
+
+ retval = zfcp_fc_ns_gid_pn(&port->erp_action);
+ if (retval == -ENOMEM)
+ zfcp_erp_notify(&port->erp_action, ZFCP_ERP_NOMEM);
+ port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
+ if (retval)
+ zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED);
+
+}
+
static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
struct zfcp_port *port = act->port;
- struct zfcp_port *ns_port = adapter->nameserver_port;
int p_status = atomic_read(&port->status);
switch (act->step) {
@@ -927,28 +868,10 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
case ZFCP_ERP_STEP_PORT_CLOSING:
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
return zfcp_erp_open_ptp_port(act);
- if (!ns_port) {
- dev_err(&adapter->ccw_device->dev,
- "Nameserver port unavailable.\n");
- return ZFCP_ERP_FAILED;
- }
- if (!(atomic_read(&ns_port->status) &
- ZFCP_STATUS_COMMON_UNBLOCKED)) {
- /* nameserver port may live again */
- atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING,
- &ns_port->status);
- if (zfcp_erp_port_reopen(ns_port, 0, 77, act) >= 0) {
- act->step = ZFCP_ERP_STEP_NAMESERVER_OPEN;
- return ZFCP_ERP_CONTINUES;
- }
- return ZFCP_ERP_FAILED;
+ if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
+ queue_work(zfcp_data.work_queue, &port->gid_pn_work);
+ return ZFCP_ERP_CONTINUES;
}
- /* else nameserver port is already open, fall through */
- case ZFCP_ERP_STEP_NAMESERVER_OPEN:
- if (!(atomic_read(&ns_port->status) & ZFCP_STATUS_COMMON_OPEN))
- return ZFCP_ERP_FAILED;
- return zfcp_erp_port_strategy_open_lookup(act);
-
case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
@@ -961,25 +884,26 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
case ZFCP_ERP_STEP_PORT_OPENING:
/* D_ID might have changed during open */
- if ((p_status & ZFCP_STATUS_COMMON_OPEN) &&
- (p_status & ZFCP_STATUS_PORT_DID_DID))
- return ZFCP_ERP_SUCCEEDED;
+ if (p_status & ZFCP_STATUS_COMMON_OPEN) {
+ if (p_status & ZFCP_STATUS_PORT_DID_DID)
+ return ZFCP_ERP_SUCCEEDED;
+ else {
+ act->step = ZFCP_ERP_STEP_PORT_CLOSING;
+ return ZFCP_ERP_CONTINUES;
+ }
/* fall through otherwise */
+ }
}
return ZFCP_ERP_FAILED;
}
-static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *act)
-{
- if (atomic_read(&act->port->status) & (ZFCP_STATUS_PORT_WKA))
- return zfcp_erp_port_strategy_open_nameserver(act);
- return zfcp_erp_port_strategy_open_common(act);
-}
-
static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
{
struct zfcp_port *port = erp_action->port;
+ if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)
+ goto close_init_done;
+
switch (erp_action->step) {
case ZFCP_ERP_STEP_UNINITIALIZED:
zfcp_erp_port_strategy_clearstati(port);
@@ -992,19 +916,17 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
return ZFCP_ERP_FAILED;
break;
}
+
+close_init_done:
if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
return ZFCP_ERP_EXIT;
- else
- return zfcp_erp_port_strategy_open(erp_action);
- return ZFCP_ERP_FAILED;
+ return zfcp_erp_port_strategy_open_common(erp_action);
}
static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
{
- atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING |
- ZFCP_STATUS_COMMON_CLOSING |
- ZFCP_STATUS_COMMON_ACCESS_DENIED |
+ atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_UNIT_SHARED |
ZFCP_STATUS_UNIT_READONLY,
&unit->status);
@@ -1065,8 +987,14 @@ static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
break;
case ZFCP_ERP_FAILED :
atomic_inc(&unit->erp_counter);
- if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS)
+ if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) {
+ dev_err(&unit->port->adapter->ccw_device->dev,
+ "ERP failed for unit 0x%016Lx on "
+ "port 0x%016Lx\n",
+ (unsigned long long)unit->fcp_lun,
+ (unsigned long long)unit->port->wwpn);
zfcp_erp_unit_failed(unit, 21, NULL);
+ }
break;
}
@@ -1091,8 +1019,12 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
result = ZFCP_ERP_EXIT;
}
atomic_inc(&port->erp_counter);
- if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS)
+ if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) {
+ dev_err(&port->adapter->ccw_device->dev,
+ "ERP failed for remote port 0x%016Lx\n",
+ (unsigned long long)port->wwpn);
zfcp_erp_port_failed(port, 22, NULL);
+ }
break;
}
@@ -1114,8 +1046,12 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
case ZFCP_ERP_FAILED :
atomic_inc(&adapter->erp_counter);
- if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS)
+ if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) {
+ dev_err(&adapter->ccw_device->dev,
+ "ERP cannot recover an error "
+ "on the FCP device\n");
zfcp_erp_adapter_failed(adapter, 23, NULL);
+ }
break;
}
@@ -1250,9 +1186,10 @@ static void zfcp_erp_scsi_scan(struct work_struct *work)
struct zfcp_unit *unit = p->unit;
struct fc_rport *rport = unit->port->rport;
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
- unit->scsi_lun, 0);
+ scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0);
atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
zfcp_unit_put(unit);
+ wake_up(&unit->port->adapter->erp_done_wqh);
kfree(p);
}
@@ -1263,9 +1200,9 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
dev_err(&unit->port->adapter->ccw_device->dev,
- "Out of resources. Could not register unit "
- "0x%016Lx on port 0x%016Lx with SCSI stack.\n",
- unit->fcp_lun, unit->port->wwpn);
+ "Registering unit 0x%016Lx on port 0x%016Lx failed\n",
+ (unsigned long long)unit->fcp_lun,
+ (unsigned long long)unit->port->wwpn);
return;
}
@@ -1273,7 +1210,7 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
INIT_WORK(&p->work, zfcp_erp_scsi_scan);
p->unit = unit;
- schedule_work(&p->work);
+ queue_work(zfcp_data.work_queue, &p->work);
}
static void zfcp_erp_rport_register(struct zfcp_port *port)
@@ -1286,8 +1223,8 @@ static void zfcp_erp_rport_register(struct zfcp_port *port)
port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!port->rport) {
dev_err(&port->adapter->ccw_device->dev,
- "Failed registration of rport "
- "0x%016Lx.\n", port->wwpn);
+ "Registering port 0x%016Lx failed\n",
+ (unsigned long long)port->wwpn);
return;
}
@@ -1299,12 +1236,12 @@ static void zfcp_erp_rport_register(struct zfcp_port *port)
static void zfcp_erp_rports_del(struct zfcp_adapter *adapter)
{
struct zfcp_port *port;
- list_for_each_entry(port, &adapter->port_list_head, list)
- if (port->rport && !(atomic_read(&port->status) &
- ZFCP_STATUS_PORT_WKA)) {
- fc_remote_port_delete(port->rport);
- port->rport = NULL;
- }
+ list_for_each_entry(port, &adapter->port_list_head, list) {
+ if (!port->rport)
+ continue;
+ fc_remote_port_delete(port->rport);
+ port->rport = NULL;
+ }
}
static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
@@ -1439,7 +1376,7 @@ static int zfcp_erp_thread(void *data)
struct zfcp_erp_action *act;
unsigned long flags;
- daemonize("zfcperp%s", adapter->ccw_device->dev.bus_id);
+ daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev));
/* Block all signals */
siginitsetinv(&current->blocked, 0);
atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
@@ -1459,9 +1396,9 @@ static int zfcp_erp_thread(void *data)
zfcp_erp_wakeup(adapter);
}
- zfcp_rec_dbf_event_thread(4, adapter);
+ zfcp_rec_dbf_event_thread_lock(4, adapter);
down_interruptible(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread(5, adapter);
+ zfcp_rec_dbf_event_thread_lock(5, adapter);
}
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
@@ -1484,7 +1421,7 @@ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
if (retval < 0) {
dev_err(&adapter->ccw_device->dev,
- "Creation of ERP thread failed.\n");
+ "Creating an ERP thread for the FCP device failed.\n");
return retval;
}
wait_event(adapter->erp_thread_wqh,
@@ -1506,7 +1443,7 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
{
atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
up(&adapter->erp_ready_sem);
- zfcp_rec_dbf_event_thread_lock(2, adapter);
+ zfcp_rec_dbf_event_thread_lock(3, adapter);
wait_event(adapter->erp_thread_wqh,
!(atomic_read(&adapter->status) &
@@ -1526,7 +1463,6 @@ void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
{
zfcp_erp_modify_adapter_status(adapter, id, ref,
ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
- dev_err(&adapter->ccw_device->dev, "Adapter ERP failed.\n");
}
/**
@@ -1539,15 +1475,6 @@ void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
{
zfcp_erp_modify_port_status(port, id, ref,
ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
-
- if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
- dev_err(&port->adapter->ccw_device->dev,
- "Port ERP failed for WKA port d_id=0x%06x.\n",
- port->d_id);
- else
- dev_err(&port->adapter->ccw_device->dev,
- "Port ERP failed for port wwpn=0x%016Lx.\n",
- port->wwpn);
}
/**
@@ -1560,10 +1487,6 @@ void zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
{
zfcp_erp_modify_unit_status(unit, id, ref,
ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
-
- dev_err(&unit->port->adapter->ccw_device->dev,
- "Unit ERP failed for unit 0x%016Lx on port 0x%016Lx.\n",
- unit->fcp_lun, unit->port->wwpn);
}
/**
@@ -1754,9 +1677,8 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id,
if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
- if (!(status & ZFCP_STATUS_PORT_WKA))
- list_for_each_entry(unit, &port->unit_list_head, list)
- zfcp_erp_unit_access_changed(unit, id, ref);
+ list_for_each_entry(unit, &port->unit_list_head, list)
+ zfcp_erp_unit_access_changed(unit, id, ref);
return;
}
@@ -1779,10 +1701,7 @@ void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id,
return;
read_lock_irqsave(&zfcp_data.config_lock, flags);
- if (adapter->nameserver_port)
- zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref);
list_for_each_entry(port, &adapter->port_list_head, list)
- if (port != adapter->nameserver_port)
- zfcp_erp_port_access_changed(port, id, ref);
+ zfcp_erp_port_access_changed(port, id, ref);
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index edfdb21591f..b5adeda93e1 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -12,16 +12,14 @@
#include "zfcp_def.h"
/* zfcp_aux.c */
-extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *,
- fcp_lun_t);
-extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *,
- wwn_t);
+extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
+extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
extern int zfcp_adapter_enqueue(struct ccw_device *);
extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
-extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t, u32,
+extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
u32);
extern void zfcp_port_dequeue(struct zfcp_port *);
-extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t);
+extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
extern void zfcp_unit_dequeue(struct zfcp_unit *);
extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
extern void zfcp_sg_free_table(struct scatterlist *, int);
@@ -29,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
/* zfcp_ccw.c */
extern int zfcp_ccw_register(void);
+extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
/* zfcp_cfdc.c */
extern struct miscdevice zfcp_cfdc_misc;
@@ -50,6 +49,8 @@ extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
struct fsf_status_read_buffer *);
extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
int);
+extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *,
+ struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
@@ -91,17 +92,21 @@ extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8, void *);
extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *);
extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
extern void zfcp_erp_timeout_handler(unsigned long);
+extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *);
/* zfcp_fc.c */
extern int zfcp_scan_ports(struct zfcp_adapter *);
extern void _zfcp_scan_ports_later(struct work_struct *);
extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
-extern int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *);
+extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
extern void zfcp_test_link(struct zfcp_port *);
+extern void zfcp_fc_nameserver_init(struct zfcp_adapter *);
/* zfcp_fsf.c */
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
+extern int zfcp_fsf_open_wka_port(struct zfcp_wka_port *);
+extern int zfcp_fsf_close_wka_port(struct zfcp_wka_port *);
extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
@@ -135,10 +140,8 @@ extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
extern int zfcp_qdio_allocate(struct zfcp_adapter *);
extern void zfcp_qdio_free(struct zfcp_adapter *);
extern int zfcp_qdio_send(struct zfcp_fsf_req *);
-extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req(
- struct zfcp_fsf_req *);
-extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr(
- struct zfcp_fsf_req *);
+extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *);
+extern struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long,
struct scatterlist *, int);
extern int zfcp_qdio_open(struct zfcp_adapter *);
@@ -148,14 +151,12 @@ extern void zfcp_qdio_close(struct zfcp_adapter *);
extern struct zfcp_data zfcp_data;
extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
-extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
extern struct fc_function_template zfcp_transport_functions;
/* zfcp_sysfs.c */
extern struct attribute_group zfcp_sysfs_unit_attrs;
extern struct attribute_group zfcp_sysfs_adapter_attrs;
-extern struct attribute_group zfcp_sysfs_ns_port_attrs;
extern struct attribute_group zfcp_sysfs_port_attrs;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 56196c98c07..1a7c80a77ff 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -39,6 +39,84 @@ struct zfcp_gpn_ft {
struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
};
+struct zfcp_fc_ns_handler_data {
+ struct completion done;
+ void (*handler)(unsigned long);
+ unsigned long handler_data;
+};
+
+static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
+{
+ if (mutex_lock_interruptible(&wka_port->mutex))
+ return -ERESTARTSYS;
+
+ if (wka_port->status != ZFCP_WKA_PORT_ONLINE) {
+ wka_port->status = ZFCP_WKA_PORT_OPENING;
+ if (zfcp_fsf_open_wka_port(wka_port))
+ wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ }
+
+ mutex_unlock(&wka_port->mutex);
+
+ wait_event_timeout(
+ wka_port->completion_wq,
+ wka_port->status == ZFCP_WKA_PORT_ONLINE ||
+ wka_port->status == ZFCP_WKA_PORT_OFFLINE,
+ HZ >> 1);
+
+ if (wka_port->status == ZFCP_WKA_PORT_ONLINE) {
+ atomic_inc(&wka_port->refcount);
+ return 0;
+ }
+ return -EIO;
+}
+
+static void zfcp_wka_port_offline(struct work_struct *work)
+{
+ struct delayed_work *dw = container_of(work, struct delayed_work, work);
+ struct zfcp_wka_port *wka_port =
+ container_of(dw, struct zfcp_wka_port, work);
+
+ wait_event(wka_port->completion_wq,
+ atomic_read(&wka_port->refcount) == 0);
+
+ mutex_lock(&wka_port->mutex);
+ if ((atomic_read(&wka_port->refcount) != 0) ||
+ (wka_port->status != ZFCP_WKA_PORT_ONLINE))
+ goto out;
+
+ wka_port->status = ZFCP_WKA_PORT_CLOSING;
+ if (zfcp_fsf_close_wka_port(wka_port)) {
+ wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wake_up(&wka_port->completion_wq);
+ }
+out:
+ mutex_unlock(&wka_port->mutex);
+}
+
+static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
+{
+ if (atomic_dec_return(&wka_port->refcount) != 0)
+ return;
+ /* wait 10 miliseconds, other reqs might pop in */
+ schedule_delayed_work(&wka_port->work, HZ / 100);
+}
+
+void zfcp_fc_nameserver_init(struct zfcp_adapter *adapter)
+{
+ struct zfcp_wka_port *wka_port = &adapter->nsp;
+
+ init_waitqueue_head(&wka_port->completion_wq);
+
+ wka_port->adapter = adapter;
+ wka_port->d_id = ZFCP_DID_DIRECTORY_SERVICE;
+
+ wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ atomic_set(&wka_port->refcount, 0);
+ mutex_init(&wka_port->mutex);
+ INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline);
+}
+
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct fcp_rscn_element *elem)
{
@@ -47,10 +125,8 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
read_lock_irqsave(&zfcp_data.config_lock, flags);
list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
- if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
- continue;
/* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */
- if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status))
+ if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_DID_DID))
/* Try to connect to unused ports anyway. */
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -102,7 +178,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
schedule_work(&fsf_req->adapter->scan_work);
}
-static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, wwn_t wwpn)
+static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port;
@@ -157,7 +233,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
zfcp_fc_incoming_rscn(fsf_req);
}
-static void zfcp_ns_gid_pn_handler(unsigned long data)
+static void zfcp_fc_ns_handler(unsigned long data)
+{
+ struct zfcp_fc_ns_handler_data *compl_rec =
+ (struct zfcp_fc_ns_handler_data *) data;
+
+ if (compl_rec->handler)
+ compl_rec->handler(compl_rec->handler_data);
+
+ complete(&compl_rec->done);
+}
+
+static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
{
struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
struct zfcp_send_ct *ct = &gid_pn->ct;
@@ -166,43 +253,31 @@ static void zfcp_ns_gid_pn_handler(unsigned long data)
struct zfcp_port *port = gid_pn->port;
if (ct->status)
- goto out;
+ return;
if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) {
atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
- goto out;
+ return;
}
/* paranoia */
if (ct_iu_req->wwpn != port->wwpn)
- goto out;
+ return;
/* looks like a valid d_id */
port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
-out:
- mempool_free(gid_pn, port->adapter->pool.data_gid_pn);
}
-/**
- * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
- * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
- * return: -ENOMEM on error, 0 otherwise
- */
-int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
+int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
+ struct zfcp_gid_pn_data *gid_pn)
{
- int ret;
- struct zfcp_gid_pn_data *gid_pn;
struct zfcp_adapter *adapter = erp_action->adapter;
-
- gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
- if (!gid_pn)
- return -ENOMEM;
-
- memset(gid_pn, 0, sizeof(*gid_pn));
+ struct zfcp_fc_ns_handler_data compl_rec;
+ int ret;
/* setup parameters for send generic command */
gid_pn->port = erp_action->port;
- gid_pn->ct.port = adapter->nameserver_port;
- gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
- gid_pn->ct.handler_data = (unsigned long) gid_pn;
+ gid_pn->ct.wka_port = &adapter->nsp;
+ gid_pn->ct.handler = zfcp_fc_ns_handler;
+ gid_pn->ct.handler_data = (unsigned long) &compl_rec;
gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
gid_pn->ct.req = &gid_pn->req;
gid_pn->ct.resp = &gid_pn->resp;
@@ -222,10 +297,42 @@ int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE;
gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
+ init_completion(&compl_rec.done);
+ compl_rec.handler = zfcp_fc_ns_gid_pn_eval;
+ compl_rec.handler_data = (unsigned long) gid_pn;
ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
erp_action);
+ if (!ret)
+ wait_for_completion(&compl_rec.done);
+ return ret;
+}
+
+/**
+ * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
+ * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
+ * return: -ENOMEM on error, 0 otherwise
+ */
+int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action)
+{
+ int ret;
+ struct zfcp_gid_pn_data *gid_pn;
+ struct zfcp_adapter *adapter = erp_action->adapter;
+
+ gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
+ if (!gid_pn)
+ return -ENOMEM;
+
+ memset(gid_pn, 0, sizeof(*gid_pn));
+
+ ret = zfcp_wka_port_get(&adapter->nsp);
if (ret)
- mempool_free(gid_pn, adapter->pool.data_gid_pn);
+ goto out;
+
+ ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn);
+
+ zfcp_wka_port_put(&adapter->nsp);
+out:
+ mempool_free(gid_pn, adapter->pool.data_gid_pn);
return ret;
}
@@ -255,14 +362,14 @@ struct zfcp_els_adisc {
struct scatterlist req;
struct scatterlist resp;
struct zfcp_ls_adisc ls_adisc;
- struct zfcp_ls_adisc_acc ls_adisc_acc;
+ struct zfcp_ls_adisc ls_adisc_acc;
};
static void zfcp_fc_adisc_handler(unsigned long data)
{
struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data;
struct zfcp_port *port = adisc->els.port;
- struct zfcp_ls_adisc_acc *ls_adisc = &adisc->ls_adisc_acc;
+ struct zfcp_ls_adisc *ls_adisc = &adisc->ls_adisc_acc;
if (adisc->els.status) {
/* request rejected or timed out */
@@ -295,7 +402,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
sg_init_one(adisc->els.req, &adisc->ls_adisc,
sizeof(struct zfcp_ls_adisc));
sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
- sizeof(struct zfcp_ls_adisc_acc));
+ sizeof(struct zfcp_ls_adisc));
adisc->els.req_count = 1;
adisc->els.resp_count = 1;
@@ -338,30 +445,6 @@ void zfcp_test_link(struct zfcp_port *port)
zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
}
-static int zfcp_scan_get_nameserver(struct zfcp_adapter *adapter)
-{
- int ret;
-
- if (!adapter->nameserver_port)
- return -EINTR;
-
- if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
- &adapter->nameserver_port->status)) {
- ret = zfcp_erp_port_reopen(adapter->nameserver_port, 0, 148,
- NULL);
- if (ret)
- return ret;
- zfcp_erp_wait(adapter);
- }
- return !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
- &adapter->nameserver_port->status);
-}
-
-static void zfcp_gpn_ft_handler(unsigned long _done)
-{
- complete((struct completion *)_done);
-}
-
static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft)
{
struct scatterlist *sg = &gpn_ft->sg_req;
@@ -403,7 +486,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
{
struct zfcp_send_ct *ct = &gpn_ft->ct;
struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
- struct completion done;
+ struct zfcp_fc_ns_handler_data compl_rec;
int ret;
/* prepare CT IU for GPN_FT */
@@ -420,19 +503,20 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
req->fc4_type = ZFCP_CT_SCSI_FCP;
/* prepare zfcp_send_ct */
- ct->port = adapter->nameserver_port;
- ct->handler = zfcp_gpn_ft_handler;
- ct->handler_data = (unsigned long)&done;
+ ct->wka_port = &adapter->nsp;
+ ct->handler = zfcp_fc_ns_handler;
+ ct->handler_data = (unsigned long)&compl_rec;
ct->timeout = 10;
ct->req = &gpn_ft->sg_req;
ct->resp = gpn_ft->sg_resp;
ct->req_count = 1;
ct->resp_count = ZFCP_GPN_FT_BUFFERS;
- init_completion(&done);
+ init_completion(&compl_rec.done);
+ compl_rec.handler = NULL;
ret = zfcp_fsf_send_ct(ct, NULL, NULL);
if (!ret)
- wait_for_completion(&done);
+ wait_for_completion(&compl_rec.done);
return ret;
}
@@ -442,9 +526,8 @@ static void zfcp_validate_port(struct zfcp_port *port)
atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
- if (port == adapter->nameserver_port)
- return;
- if ((port->supported_classes != 0) || (port->units != 0)) {
+ if ((port->supported_classes != 0) ||
+ !list_empty(&port->unit_list_head)) {
zfcp_port_put(port);
return;
}
@@ -460,7 +543,7 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
struct scatterlist *sg = gpn_ft->sg_resp;
struct ct_hdr *hdr = sg_virt(sg);
struct gpn_ft_resp_acc *acc = sg_virt(sg);
- struct zfcp_adapter *adapter = ct->port->adapter;
+ struct zfcp_adapter *adapter = ct->wka_port->adapter;
struct zfcp_port *port, *tmp;
u32 d_id;
int ret = 0, x, last = 0;
@@ -490,6 +573,9 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 |
acc->port_id[2];
+ /* don't attach ports with a well known address */
+ if ((d_id & ZFCP_DID_WKA) == ZFCP_DID_WKA)
+ continue;
/* skip the adapter's port and known remote ports */
if (acc->wwpn == fc_host_port_name(adapter->scsi_host))
continue;
@@ -528,13 +614,15 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
return 0;
- ret = zfcp_scan_get_nameserver(adapter);
+ ret = zfcp_wka_port_get(&adapter->nsp);
if (ret)
return ret;
gpn_ft = zfcp_alloc_sg_env();
- if (!gpn_ft)
- return -ENOMEM;
+ if (!gpn_ft) {
+ ret = -ENOMEM;
+ goto out;
+ }
for (i = 0; i < 3; i++) {
ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter);
@@ -547,7 +635,8 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
}
}
zfcp_free_sg_env(gpn_ft);
-
+out:
+ zfcp_wka_port_put(&adapter->nsp);
return ret;
}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 49dbeb754e5..739356a5c12 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -50,19 +50,16 @@ static u32 fsf_qtcb_type[] = {
[FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
};
-static const char *zfcp_act_subtable_type[] = {
- "unknown", "OS", "WWPN", "DID", "LUN"
-};
-
static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
{
u16 subtable = table >> 16;
u16 rule = table & 0xffff;
+ const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
- if (subtable && subtable < ARRAY_SIZE(zfcp_act_subtable_type))
+ if (subtable && subtable < ARRAY_SIZE(act_type))
dev_warn(&adapter->ccw_device->dev,
- "Access denied in subtable %s, rule %d.\n",
- zfcp_act_subtable_type[subtable], rule);
+ "Access denied according to ACT rule type %s, "
+ "rule %d\n", act_type[subtable], rule);
}
static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
@@ -70,8 +67,8 @@ static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
{
struct fsf_qtcb_header *header = &req->qtcb->header;
dev_warn(&req->adapter->ccw_device->dev,
- "Access denied, cannot send command to port 0x%016Lx.\n",
- port->wwpn);
+ "Access denied to port 0x%016Lx\n",
+ (unsigned long long)port->wwpn);
zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
zfcp_erp_port_access_denied(port, 55, req);
@@ -83,8 +80,9 @@ static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
{
struct fsf_qtcb_header *header = &req->qtcb->header;
dev_warn(&req->adapter->ccw_device->dev,
- "Access denied for unit 0x%016Lx on port 0x%016Lx.\n",
- unit->fcp_lun, unit->port->wwpn);
+ "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
+ (unsigned long long)unit->fcp_lun,
+ (unsigned long long)unit->port->wwpn);
zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
zfcp_erp_unit_access_denied(unit, 59, req);
@@ -93,9 +91,8 @@ static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
{
- dev_err(&req->adapter->ccw_device->dev,
- "Required FC class not supported by adapter, "
- "shutting down adapter.\n");
+ dev_err(&req->adapter->ccw_device->dev, "FCP device not "
+ "operational because of an unsupported FC class\n");
zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
}
@@ -171,42 +168,6 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
read_unlock_irqrestore(&zfcp_data.config_lock, flags);
}
-static void zfcp_fsf_bit_error_threshold(struct zfcp_fsf_req *req)
-{
- struct zfcp_adapter *adapter = req->adapter;
- struct fsf_status_read_buffer *sr_buf = req->data;
- struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
-
- dev_warn(&adapter->ccw_device->dev,
- "Warning: bit error threshold data "
- "received for the adapter: "
- "link failures = %i, loss of sync errors = %i, "
- "loss of signal errors = %i, "
- "primitive sequence errors = %i, "
- "invalid transmission word errors = %i, "
- "CRC errors = %i).\n",
- err->link_failure_error_count,
- err->loss_of_sync_error_count,
- err->loss_of_signal_error_count,
- err->primitive_sequence_error_count,
- err->invalid_transmission_word_error_count,
- err->crc_error_count);
- dev_warn(&adapter->ccw_device->dev,
- "Additional bit error threshold data of the adapter: "
- "primitive sequence event time-outs = %i, "
- "elastic buffer overrun errors = %i, "
- "advertised receive buffer-to-buffer credit = %i, "
- "current receice buffer-to-buffer credit = %i, "
- "advertised transmit buffer-to-buffer credit = %i, "
- "current transmit buffer-to-buffer credit = %i).\n",
- err->primitive_sequence_event_timeout_count,
- err->elastic_buffer_overrun_error_count,
- err->advertised_receive_b2b_credit,
- err->current_receive_b2b_credit,
- err->advertised_transmit_b2b_credit,
- err->current_transmit_b2b_credit);
-}
-
static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
struct fsf_link_down_info *link_down)
{
@@ -223,62 +184,66 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
switch (link_down->error_code) {
case FSF_PSQ_LINK_NO_LIGHT:
dev_warn(&req->adapter->ccw_device->dev,
- "The local link is down: no light detected.\n");
+ "There is no light signal from the local "
+ "fibre channel cable\n");
break;
case FSF_PSQ_LINK_WRAP_PLUG:
dev_warn(&req->adapter->ccw_device->dev,
- "The local link is down: wrap plug detected.\n");
+ "There is a wrap plug instead of a fibre "
+ "channel cable\n");
break;
case FSF_PSQ_LINK_NO_FCP:
dev_warn(&req->adapter->ccw_device->dev,
- "The local link is down: "
- "adjacent node on link does not support FCP.\n");
+ "The adjacent fibre channel node does not "
+ "support FCP\n");
break;
case FSF_PSQ_LINK_FIRMWARE_UPDATE:
dev_warn(&req->adapter->ccw_device->dev,
- "The local link is down: "
- "firmware update in progress.\n");
+ "The FCP device is suspended because of a "
+ "firmware update\n");
break;
case FSF_PSQ_LINK_INVALID_WWPN:
dev_warn(&req->adapter->ccw_device->dev,
- "The local link is down: "
- "duplicate or invalid WWPN detected.\n");
+ "The FCP device detected a WWPN that is "
+ "duplicate or not valid\n");
break;
case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
dev_warn(&req->adapter->ccw_device->dev,
- "The local link is down: "
- "no support for NPIV by Fabric.\n");
+ "The fibre channel fabric does not support NPIV\n");
break;
case FSF_PSQ_LINK_NO_FCP_RESOURCES:
dev_warn(&req->adapter->ccw_device->dev,
- "The local link is down: "
- "out of resource in FCP daughtercard.\n");
+ "The FCP adapter cannot support more NPIV ports\n");
break;
case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
dev_warn(&req->adapter->ccw_device->dev,
- "The local link is down: "
- "out of resource in Fabric.\n");
+ "The adjacent switch cannot support "
+ "more NPIV ports\n");
break;
case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
dev_warn(&req->adapter->ccw_device->dev,
- "The local link is down: "
- "unable to login to Fabric.\n");
+ "The FCP adapter could not log in to the "
+ "fibre channel fabric\n");
break;
case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
dev_warn(&req->adapter->ccw_device->dev,
- "WWPN assignment file corrupted on adapter.\n");
+ "The WWPN assignment file on the FCP adapter "
+ "has been damaged\n");
break;
case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
dev_warn(&req->adapter->ccw_device->dev,
- "Mode table corrupted on adapter.\n");
+ "The mode table on the FCP adapter "
+ "has been damaged\n");
break;
case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
dev_warn(&req->adapter->ccw_device->dev,
- "No WWPN for assignment table on adapter.\n");
+ "All NPIV ports on the FCP adapter have "
+ "been assigned\n");
break;
default:
dev_warn(&req->adapter->ccw_device->dev,
- "The local link to adapter is down.\n");
+ "The link between the FCP adapter and "
+ "the FC fabric is down\n");
}
out:
zfcp_erp_adapter_failed(adapter, id, req);
@@ -286,27 +251,18 @@ out:
static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
{
- struct zfcp_adapter *adapter = req->adapter;
struct fsf_status_read_buffer *sr_buf = req->data;
struct fsf_link_down_info *ldi =
(struct fsf_link_down_info *) &sr_buf->payload;
switch (sr_buf->status_subtype) {
case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
- dev_warn(&adapter->ccw_device->dev,
- "Physical link is down.\n");
zfcp_fsf_link_down_info_eval(req, 38, ldi);
break;
case FSF_STATUS_READ_SUB_FDISC_FAILED:
- dev_warn(&adapter->ccw_device->dev,
- "Local link is down "
- "due to failed FDISC login.\n");
zfcp_fsf_link_down_info_eval(req, 39, ldi);
break;
case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
- dev_warn(&adapter->ccw_device->dev,
- "Local link is down "
- "due to firmware update on adapter.\n");
zfcp_fsf_link_down_info_eval(req, 40, NULL);
};
}
@@ -335,14 +291,17 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
case FSF_STATUS_READ_SENSE_DATA_AVAIL:
break;
case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
- zfcp_fsf_bit_error_threshold(req);
+ dev_warn(&adapter->ccw_device->dev,
+ "The error threshold for checksum statistics "
+ "has been exceeded\n");
+ zfcp_hba_dbf_event_berr(adapter, req);
break;
case FSF_STATUS_READ_LINK_DOWN:
zfcp_fsf_status_read_link_down(req);
break;
case FSF_STATUS_READ_LINK_UP:
dev_info(&adapter->ccw_device->dev,
- "Local link was replugged.\n");
+ "The local link has been restored\n");
/* All ports should be marked as ready to run again */
zfcp_erp_modify_adapter_status(adapter, 30, NULL,
ZFCP_STATUS_COMMON_RUNNING,
@@ -370,7 +329,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
zfcp_fsf_req_free(req);
atomic_inc(&adapter->stat_miss);
- schedule_work(&adapter->stat_work);
+ queue_work(zfcp_data.work_queue, &adapter->stat_work);
}
static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
@@ -386,8 +345,8 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
break;
case FSF_SQ_NO_RECOM:
dev_err(&req->adapter->ccw_device->dev,
- "No recommendation could be given for a "
- "problem on the adapter.\n");
+ "The FCP adapter reported a problem "
+ "that cannot be recovered\n");
zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req);
break;
}
@@ -403,8 +362,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
switch (req->qtcb->header.fsf_status) {
case FSF_UNKNOWN_COMMAND:
dev_err(&req->adapter->ccw_device->dev,
- "Command issued by the device driver (0x%x) is "
- "not known by the adapter.\n",
+ "The FCP adapter does not recognize the command 0x%x\n",
req->qtcb->header.fsf_command);
zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -435,11 +393,9 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
return;
case FSF_PROT_QTCB_VERSION_ERROR:
dev_err(&adapter->ccw_device->dev,
- "The QTCB version requested by zfcp (0x%x) is not "
- "supported by the FCP adapter (lowest supported "
- "0x%x, highest supported 0x%x).\n",
- FSF_QTCB_CURRENT_VERSION, psq->word[0],
- psq->word[1]);
+ "QTCB version 0x%x not supported by FCP adapter "
+ "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
+ psq->word[0], psq->word[1]);
zfcp_erp_adapter_shutdown(adapter, 0, 117, req);
break;
case FSF_PROT_ERROR_STATE:
@@ -449,8 +405,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
break;
case FSF_PROT_UNSUPP_QTCB_TYPE:
dev_err(&adapter->ccw_device->dev,
- "Packet header type used by the device driver is "
- "incompatible with that used on the adapter.\n");
+ "The QTCB type is not supported by the FCP adapter\n");
zfcp_erp_adapter_shutdown(adapter, 0, 118, req);
break;
case FSF_PROT_HOST_CONNECTION_INITIALIZING:
@@ -459,7 +414,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
break;
case FSF_PROT_DUPLICATE_REQUEST_ID:
dev_err(&adapter->ccw_device->dev,
- "The request identifier 0x%Lx is ambiguous.\n",
+ "0x%Lx is an ambiguous request identifier\n",
(unsigned long long)qtcb->bottom.support.req_handle);
zfcp_erp_adapter_shutdown(adapter, 0, 78, req);
break;
@@ -479,9 +434,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
break;
default:
dev_err(&adapter->ccw_device->dev,
- "Transfer protocol status information"
- "provided by the adapter (0x%x) "
- "is not compatible with the device driver.\n",
+ "0x%x is not a valid transfer protocol status\n",
qtcb->prefix.prot_status);
zfcp_erp_adapter_shutdown(adapter, 0, 119, req);
}
@@ -559,33 +512,17 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
adapter->peer_wwpn = bottom->plogi_payload.wwpn;
adapter->peer_wwnn = bottom->plogi_payload.wwnn;
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
- if (req->erp_action)
- dev_info(&adapter->ccw_device->dev,
- "Point-to-Point fibrechannel "
- "configuration detected.\n");
break;
case FSF_TOPO_FABRIC:
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
- if (req->erp_action)
- dev_info(&adapter->ccw_device->dev,
- "Switched fabric fibrechannel "
- "network detected.\n");
break;
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
- dev_err(&adapter->ccw_device->dev,
- "Unsupported arbitrated loop fibrechannel "
- "topology detected, shutting down "
- "adapter.\n");
- zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
- return -EIO;
default:
- fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
dev_err(&adapter->ccw_device->dev,
- "The fibrechannel topology reported by the"
- " adapter is not known by the zfcp driver,"
- " shutting down adapter.\n");
- zfcp_erp_adapter_shutdown(adapter, 0, 128, req);
+ "Unknown or unsupported arbitrated loop "
+ "fibre channel topology detected\n");
+ zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
return -EIO;
}
@@ -616,11 +553,9 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
dev_err(&adapter->ccw_device->dev,
- "Maximum QTCB size (%d bytes) allowed by "
- "the adapter is lower than the minimum "
- "required by the driver (%ld bytes).\n",
- bottom->max_qtcb_size,
- sizeof(struct fsf_qtcb));
+ "FCP adapter maximum QTCB size (%d bytes) "
+ "is too small\n",
+ bottom->max_qtcb_size);
zfcp_erp_adapter_shutdown(adapter, 0, 129, req);
return;
}
@@ -656,15 +591,15 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
dev_err(&adapter->ccw_device->dev,
- "The adapter only supports newer control block "
- "versions, try updated device driver.\n");
+ "The FCP adapter only supports newer "
+ "control block versions\n");
zfcp_erp_adapter_shutdown(adapter, 0, 125, req);
return;
}
if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
dev_err(&adapter->ccw_device->dev,
- "The adapter only supports older control block "
- "versions, consider a microcode upgrade.\n");
+ "The FCP adapter only supports older "
+ "control block versions\n");
zfcp_erp_adapter_shutdown(adapter, 0, 126, req);
}
}
@@ -688,7 +623,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
{
- struct zfcp_adapter *adapter = req->adapter;
struct fsf_qtcb *qtcb = req->qtcb;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
@@ -697,38 +631,47 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
switch (qtcb->header.fsf_status) {
case FSF_GOOD:
zfcp_fsf_exchange_port_evaluate(req);
- atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
zfcp_fsf_exchange_port_evaluate(req);
- atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
zfcp_fsf_link_down_info_eval(req, 43,
&qtcb->header.fsf_status_qual.link_down_info);
break;
}
}
-static int zfcp_fsf_sbal_check(struct zfcp_qdio_queue *queue)
+static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter)
{
- spin_lock_bh(&queue->lock);
- if (atomic_read(&queue->count))
+ struct zfcp_qdio_queue *req_q = &adapter->req_q;
+
+ spin_lock_bh(&adapter->req_q_lock);
+ if (atomic_read(&req_q->count))
return 1;
- spin_unlock_bh(&queue->lock);
+ spin_unlock_bh(&adapter->req_q_lock);
return 0;
}
+static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
+{
+ unsigned int count = atomic_read(&adapter->req_q.count);
+ if (!count)
+ atomic_inc(&adapter->qdio_outb_full);
+ return count > 0;
+}
+
static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
{
long ret;
- struct zfcp_qdio_queue *req_q = &adapter->req_q;
- spin_unlock_bh(&req_q->lock);
+ spin_unlock_bh(&adapter->req_q_lock);
ret = wait_event_interruptible_timeout(adapter->request_wq,
- zfcp_fsf_sbal_check(req_q), 5 * HZ);
+ zfcp_fsf_sbal_check(adapter), 5 * HZ);
if (ret > 0)
return 0;
+ if (!ret)
+ atomic_inc(&adapter->qdio_outb_full);
- spin_lock_bh(&req_q->lock);
+ spin_lock_bh(&adapter->req_q_lock);
return -EIO;
}
@@ -765,7 +708,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
u32 fsf_cmd, int req_flags,
mempool_t *pool)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req;
struct zfcp_qdio_queue *req_q = &adapter->req_q;
@@ -867,10 +810,10 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
{
struct zfcp_fsf_req *req;
struct fsf_status_read_buffer *sr_buf;
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q.lock);
+ spin_lock_bh(&adapter->req_q_lock);
if (zfcp_fsf_req_sbal_get(adapter))
goto out;
@@ -910,7 +853,7 @@ failed_buf:
zfcp_fsf_req_free(req);
zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
out:
- spin_unlock_bh(&adapter->req_q.lock);
+ spin_unlock_bh(&adapter->req_q_lock);
return retval;
}
@@ -980,11 +923,11 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
struct zfcp_unit *unit,
int req_flags)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
- spin_lock(&adapter->req_q.lock);
- if (!atomic_read(&adapter->req_q.count))
+ spin_lock(&adapter->req_q_lock);
+ if (!zfcp_fsf_sbal_available(adapter))
goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
req_flags, adapter->pool.fsf_req_abort);
@@ -1013,7 +956,7 @@ out_error_free:
zfcp_fsf_req_free(req);
req = NULL;
out:
- spin_unlock(&adapter->req_q.lock);
+ spin_unlock(&adapter->req_q_lock);
return req;
}
@@ -1021,7 +964,6 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_send_ct *send_ct = req->data;
- struct zfcp_port *port = send_ct->port;
struct fsf_qtcb_header *header = &req->qtcb->header;
send_ct->status = -EINVAL;
@@ -1040,17 +982,14 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]){
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
- zfcp_test_link(port);
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
break;
case FSF_ACCESS_DENIED:
- zfcp_fsf_access_denied_port(req, port);
break;
case FSF_PORT_BOXED:
- zfcp_erp_port_boxed(port, 49, req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR |
ZFCP_STATUS_FSFREQ_RETRY;
break;
@@ -1101,12 +1040,12 @@ static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req,
int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
struct zfcp_erp_action *erp_action)
{
- struct zfcp_port *port = ct->port;
- struct zfcp_adapter *adapter = port->adapter;
+ struct zfcp_wka_port *wka_port = ct->wka_port;
+ struct zfcp_adapter *adapter = wka_port->adapter;
struct zfcp_fsf_req *req;
int ret = -EIO;
- spin_lock_bh(&adapter->req_q.lock);
+ spin_lock_bh(&adapter->req_q_lock);
if (zfcp_fsf_req_sbal_get(adapter))
goto out;
@@ -1123,7 +1062,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
goto failed_send;
req->handler = zfcp_fsf_send_ct_handler;
- req->qtcb->header.port_handle = port->handle;
+ req->qtcb->header.port_handle = wka_port->handle;
req->qtcb->bottom.support.service_class = FSF_CLASS_3;
req->qtcb->bottom.support.timeout = ct->timeout;
req->data = ct;
@@ -1148,7 +1087,7 @@ failed_send:
if (erp_action)
erp_action->fsf_req = NULL;
out:
- spin_unlock_bh(&adapter->req_q.lock);
+ spin_unlock_bh(&adapter->req_q_lock);
return ret;
}
@@ -1218,8 +1157,8 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
ZFCP_STATUS_COMMON_UNBLOCKED)))
return -EBUSY;
- spin_lock(&adapter->req_q.lock);
- if (!atomic_read(&adapter->req_q.count))
+ spin_lock(&adapter->req_q_lock);
+ if (!zfcp_fsf_sbal_available(adapter))
goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
ZFCP_REQ_AUTO_CLEANUP, NULL);
@@ -1228,8 +1167,8 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
goto out;
}
- ret = zfcp_fsf_setup_sbals(req, els->req, els->resp,
- FSF_MAX_SBALS_PER_ELS_REQ);
+ ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 2);
+
if (ret)
goto failed_send;
@@ -1252,19 +1191,19 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
failed_send:
zfcp_fsf_req_free(req);
out:
- spin_unlock(&adapter->req_q.lock);
+ spin_unlock(&adapter->req_q_lock);
return ret;
}
int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req;
struct zfcp_adapter *adapter = erp_action->adapter;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q.lock);
- if (!atomic_read(&adapter->req_q.count))
+ spin_lock_bh(&adapter->req_q_lock);
+ if (!zfcp_fsf_sbal_available(adapter))
goto out;
req = zfcp_fsf_req_create(adapter,
FSF_QTCB_EXCHANGE_CONFIG_DATA,
@@ -1295,18 +1234,18 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
erp_action->fsf_req = NULL;
}
out:
- spin_unlock_bh(&adapter->req_q.lock);
+ spin_unlock_bh(&adapter->req_q_lock);
return retval;
}
int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
struct fsf_qtcb_bottom_config *data)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q.lock);
+ spin_lock_bh(&adapter->req_q_lock);
if (zfcp_fsf_req_sbal_get(adapter))
goto out;
@@ -1334,7 +1273,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
out:
- spin_unlock_bh(&adapter->req_q.lock);
+ spin_unlock_bh(&adapter->req_q_lock);
if (!retval)
wait_event(req->completion_wq,
req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
@@ -1351,7 +1290,7 @@ out:
*/
int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req;
struct zfcp_adapter *adapter = erp_action->adapter;
int retval = -EIO;
@@ -1359,8 +1298,8 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
return -EOPNOTSUPP;
- spin_lock_bh(&adapter->req_q.lock);
- if (!atomic_read(&adapter->req_q.count))
+ spin_lock_bh(&adapter->req_q_lock);
+ if (!zfcp_fsf_sbal_available(adapter))
goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
ZFCP_REQ_AUTO_CLEANUP,
@@ -1385,7 +1324,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
erp_action->fsf_req = NULL;
}
out:
- spin_unlock_bh(&adapter->req_q.lock);
+ spin_unlock_bh(&adapter->req_q_lock);
return retval;
}
@@ -1398,15 +1337,15 @@ out:
int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
struct fsf_qtcb_bottom_port *data)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
return -EOPNOTSUPP;
- spin_lock_bh(&adapter->req_q.lock);
- if (!atomic_read(&adapter->req_q.count))
+ spin_lock_bh(&adapter->req_q_lock);
+ if (!zfcp_fsf_sbal_available(adapter))
goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
@@ -1427,7 +1366,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
out:
- spin_unlock_bh(&adapter->req_q.lock);
+ spin_unlock_bh(&adapter->req_q_lock);
if (!retval)
wait_event(req->completion_wq,
req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
@@ -1443,7 +1382,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
struct fsf_plogi *plogi;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
- goto skip_fsfstatus;
+ return;
switch (header->fsf_status) {
case FSF_PORT_ALREADY_OPEN:
@@ -1453,9 +1392,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
break;
case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
dev_warn(&req->adapter->ccw_device->dev,
- "The adapter is out of resources. The remote port "
- "0x%016Lx could not be opened, disabling it.\n",
- port->wwpn);
+ "Not enough FCP adapter resources to open "
+ "remote port 0x%016Lx\n",
+ (unsigned long long)port->wwpn);
zfcp_erp_port_failed(port, 31, req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -1467,8 +1406,8 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
break;
case FSF_SQ_NO_RETRY_POSSIBLE:
dev_warn(&req->adapter->ccw_device->dev,
- "The remote port 0x%016Lx could not be "
- "opened. Disabling it.\n", port->wwpn);
+ "Remote port 0x%016Lx could not be opened\n",
+ (unsigned long long)port->wwpn);
zfcp_erp_port_failed(port, 32, req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -1496,9 +1435,6 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
* another GID_PN straight after a port has been opened.
* Alternately, an ADISC/PDISC ELS should suffice, as well.
*/
- if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN)
- break;
-
plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) {
if (plogi->serv_param.wwpn != port->wwpn)
@@ -1514,9 +1450,6 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
}
-
-skip_fsfstatus:
- atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status);
}
/**
@@ -1526,12 +1459,12 @@ skip_fsfstatus:
*/
int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_fsf_req *req;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q.lock);
+ spin_lock_bh(&adapter->req_q_lock);
if (zfcp_fsf_req_sbal_get(adapter))
goto out;
@@ -1553,7 +1486,6 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
req->data = erp_action->port;
req->erp_action = erp_action;
erp_action->fsf_req = req;
- atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
@@ -1562,7 +1494,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
erp_action->fsf_req = NULL;
}
out:
- spin_unlock_bh(&adapter->req_q.lock);
+ spin_unlock_bh(&adapter->req_q_lock);
return retval;
}
@@ -1571,7 +1503,7 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
struct zfcp_port *port = req->data;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
- goto skip_fsfstatus;
+ return;
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
@@ -1586,9 +1518,6 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
ZFCP_CLEAR);
break;
}
-
-skip_fsfstatus:
- atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status);
}
/**
@@ -1598,12 +1527,12 @@ skip_fsfstatus:
*/
int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_fsf_req *req;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q.lock);
+ spin_lock_bh(&adapter->req_q_lock);
if (zfcp_fsf_req_sbal_get(adapter))
goto out;
@@ -1624,7 +1553,6 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
req->erp_action = erp_action;
req->qtcb->header.port_handle = erp_action->port->handle;
erp_action->fsf_req = req;
- atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
@@ -1633,7 +1561,131 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
erp_action->fsf_req = NULL;
}
out:
- spin_unlock_bh(&adapter->req_q.lock);
+ spin_unlock_bh(&adapter->req_q_lock);
+ return retval;
+}
+
+static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
+{
+ struct zfcp_wka_port *wka_port = req->data;
+ struct fsf_qtcb_header *header = &req->qtcb->header;
+
+ if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
+ wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ goto out;
+ }
+
+ switch (header->fsf_status) {
+ case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
+ dev_warn(&req->adapter->ccw_device->dev,
+ "Opening WKA port 0x%x failed\n", wka_port->d_id);
+ case FSF_ADAPTER_STATUS_AVAILABLE:
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ case FSF_ACCESS_DENIED:
+ wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ break;
+ case FSF_PORT_ALREADY_OPEN:
+ case FSF_GOOD:
+ wka_port->handle = header->port_handle;
+ wka_port->status = ZFCP_WKA_PORT_ONLINE;
+ }
+out:
+ wake_up(&wka_port->completion_wq);
+}
+
+/**
+ * zfcp_fsf_open_wka_port - create and send open wka-port request
+ * @wka_port: pointer to struct zfcp_wka_port
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
+{
+ struct qdio_buffer_element *sbale;
+ struct zfcp_adapter *adapter = wka_port->adapter;
+ struct zfcp_fsf_req *req;
+ int retval = -EIO;
+
+ spin_lock_bh(&adapter->req_q_lock);
+ if (zfcp_fsf_req_sbal_get(adapter))
+ goto out;
+
+ req = zfcp_fsf_req_create(adapter,
+ FSF_QTCB_OPEN_PORT_WITH_DID,
+ ZFCP_REQ_AUTO_CLEANUP,
+ adapter->pool.fsf_req_erp);
+ if (unlikely(IS_ERR(req))) {
+ retval = PTR_ERR(req);
+ goto out;
+ }
+
+ sbale = zfcp_qdio_sbale_req(req);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ req->handler = zfcp_fsf_open_wka_port_handler;
+ req->qtcb->bottom.support.d_id = wka_port->d_id;
+ req->data = wka_port;
+
+ zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+ retval = zfcp_fsf_req_send(req);
+ if (retval)
+ zfcp_fsf_req_free(req);
+out:
+ spin_unlock_bh(&adapter->req_q_lock);
+ return retval;
+}
+
+static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
+{
+ struct zfcp_wka_port *wka_port = req->data;
+
+ if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ zfcp_erp_adapter_reopen(wka_port->adapter, 0, 84, req);
+ }
+
+ wka_port->status = ZFCP_WKA_PORT_OFFLINE;
+ wake_up(&wka_port->completion_wq);
+}
+
+/**
+ * zfcp_fsf_close_wka_port - create and send close wka port request
+ * @erp_action: pointer to struct zfcp_erp_action
+ * Returns: 0 on success, error otherwise
+ */
+int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
+{
+ struct qdio_buffer_element *sbale;
+ struct zfcp_adapter *adapter = wka_port->adapter;
+ struct zfcp_fsf_req *req;
+ int retval = -EIO;
+
+ spin_lock_bh(&adapter->req_q_lock);
+ if (zfcp_fsf_req_sbal_get(adapter))
+ goto out;
+
+ req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
+ ZFCP_REQ_AUTO_CLEANUP,
+ adapter->pool.fsf_req_erp);
+ if (unlikely(IS_ERR(req))) {
+ retval = PTR_ERR(req);
+ goto out;
+ }
+
+ sbale = zfcp_qdio_sbale_req(req);
+ sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
+ sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
+
+ req->handler = zfcp_fsf_close_wka_port_handler;
+ req->data = wka_port;
+ req->qtcb->header.port_handle = wka_port->handle;
+
+ zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+ retval = zfcp_fsf_req_send(req);
+ if (retval)
+ zfcp_fsf_req_free(req);
+out:
+ spin_unlock_bh(&adapter->req_q_lock);
return retval;
}
@@ -1695,12 +1747,12 @@ skip_fsfstatus:
*/
int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_fsf_req *req;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q.lock);
+ spin_lock_bh(&adapter->req_q_lock);
if (zfcp_fsf_req_sbal_get(adapter))
goto out;
@@ -1731,7 +1783,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
erp_action->fsf_req = NULL;
}
out:
- spin_unlock_bh(&adapter->req_q.lock);
+ spin_unlock_bh(&adapter->req_q_lock);
return retval;
}
@@ -1746,7 +1798,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
int exclusive, readwrite;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
- goto skip_fsfstatus;
+ return;
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_COMMON_ACCESS_BOXED |
@@ -1774,14 +1826,12 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
case FSF_LUN_SHARING_VIOLATION:
if (header->fsf_status_qual.word[0])
dev_warn(&adapter->ccw_device->dev,
- "FCP-LUN 0x%Lx at the remote port "
- "with WWPN 0x%Lx "
- "connected to the adapter "
- "is already in use in LPAR%d, CSS%d.\n",
- unit->fcp_lun,
- unit->port->wwpn,
- queue_designator->hla,
- queue_designator->cssid);
+ "LUN 0x%Lx on port 0x%Lx is already in "
+ "use by CSS%d, MIF Image ID %x\n",
+ (unsigned long long)unit->fcp_lun,
+ (unsigned long long)unit->port->wwpn,
+ queue_designator->cssid,
+ queue_designator->hla);
else
zfcp_act_eval_err(adapter,
header->fsf_status_qual.word[2]);
@@ -1792,9 +1842,10 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
break;
case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
dev_warn(&adapter->ccw_device->dev,
- "The adapter ran out of resources. There is no "
- "handle available for unit 0x%016Lx on port 0x%016Lx.",
- unit->fcp_lun, unit->port->wwpn);
+ "No handle is available for LUN "
+ "0x%016Lx on port 0x%016Lx\n",
+ (unsigned long long)unit->fcp_lun,
+ (unsigned long long)unit->port->wwpn);
zfcp_erp_unit_failed(unit, 34, req);
/* fall through */
case FSF_INVALID_COMMAND_OPTION:
@@ -1831,26 +1882,29 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
&unit->status);
dev_info(&adapter->ccw_device->dev,
- "Read-only access for unit 0x%016Lx "
- "on port 0x%016Lx.\n",
- unit->fcp_lun, unit->port->wwpn);
+ "SCSI device at LUN 0x%016Lx on port "
+ "0x%016Lx opened read-only\n",
+ (unsigned long long)unit->fcp_lun,
+ (unsigned long long)unit->port->wwpn);
}
if (exclusive && !readwrite) {
dev_err(&adapter->ccw_device->dev,
- "Exclusive access of read-only unit "
- "0x%016Lx on port 0x%016Lx not "
- "supported, disabling unit.\n",
- unit->fcp_lun, unit->port->wwpn);
+ "Exclusive read-only access not "
+ "supported (unit 0x%016Lx, "
+ "port 0x%016Lx)\n",
+ (unsigned long long)unit->fcp_lun,
+ (unsigned long long)unit->port->wwpn);
zfcp_erp_unit_failed(unit, 35, req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
zfcp_erp_unit_shutdown(unit, 0, 80, req);
} else if (!exclusive && readwrite) {
dev_err(&adapter->ccw_device->dev,
- "Shared access of read-write unit "
- "0x%016Lx on port 0x%016Lx not "
- "supported, disabling unit.\n",
- unit->fcp_lun, unit->port->wwpn);
+ "Shared read-write access not "
+ "supported (unit 0x%016Lx, port "
+ "0x%016Lx\n)",
+ (unsigned long long)unit->fcp_lun,
+ (unsigned long long)unit->port->wwpn);
zfcp_erp_unit_failed(unit, 36, req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
zfcp_erp_unit_shutdown(unit, 0, 81, req);
@@ -1858,9 +1912,6 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
}
break;
}
-
-skip_fsfstatus:
- atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status);
}
/**
@@ -1870,12 +1921,12 @@ skip_fsfstatus:
*/
int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_fsf_req *req;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q.lock);
+ spin_lock_bh(&adapter->req_q_lock);
if (zfcp_fsf_req_sbal_get(adapter))
goto out;
@@ -1901,8 +1952,6 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
- atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
-
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
@@ -1910,7 +1959,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
erp_action->fsf_req = NULL;
}
out:
- spin_unlock_bh(&adapter->req_q.lock);
+ spin_unlock_bh(&adapter->req_q_lock);
return retval;
}
@@ -1919,7 +1968,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
struct zfcp_unit *unit = req->data;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
- goto skip_fsfstatus;
+ return;
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
@@ -1949,8 +1998,6 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
break;
}
-skip_fsfstatus:
- atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status);
}
/**
@@ -1960,12 +2007,12 @@ skip_fsfstatus:
*/
int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_adapter *adapter = erp_action->adapter;
struct zfcp_fsf_req *req;
int retval = -EIO;
- spin_lock_bh(&adapter->req_q.lock);
+ spin_lock_bh(&adapter->req_q_lock);
if (zfcp_fsf_req_sbal_get(adapter))
goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN,
@@ -1986,7 +2033,6 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
req->data = erp_action->unit;
req->erp_action = erp_action;
erp_action->fsf_req = req;
- atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
@@ -1995,7 +2041,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
erp_action->fsf_req = NULL;
}
out:
- spin_unlock_bh(&adapter->req_q.lock);
+ spin_unlock_bh(&adapter->req_q_lock);
return retval;
}
@@ -2156,21 +2202,21 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
break;
case FSF_DIRECTION_INDICATOR_NOT_VALID:
dev_err(&req->adapter->ccw_device->dev,
- "Invalid data direction (%d) given for unit "
- "0x%016Lx on port 0x%016Lx, shutting down "
- "adapter.\n",
+ "Incorrect direction %d, unit 0x%016Lx on port "
+ "0x%016Lx closed\n",
req->qtcb->bottom.io.data_direction,
- unit->fcp_lun, unit->port->wwpn);
+ (unsigned long long)unit->fcp_lun,
+ (unsigned long long)unit->port->wwpn);
zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
case FSF_CMND_LENGTH_NOT_VALID:
dev_err(&req->adapter->ccw_device->dev,
- "An invalid control-data-block length field (%d) "
- "was found in a command for unit 0x%016Lx on port "
- "0x%016Lx. Shutting down adapter.\n",
+ "Incorrect CDB length %d, unit 0x%016Lx on "
+ "port 0x%016Lx closed\n",
req->qtcb->bottom.io.fcp_cmnd_length,
- unit->fcp_lun, unit->port->wwpn);
+ (unsigned long long)unit->fcp_lun,
+ (unsigned long long)unit->port->wwpn);
zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req);
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -2201,6 +2247,20 @@ skip_fsfstatus:
}
}
+static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
+{
+ u32 *fcp_dl_ptr;
+
+ /*
+ * fcp_dl_addr = start address of fcp_cmnd structure +
+ * size of fixed part + size of dynamically sized add_dcp_cdb field
+ * SEE FCP-2 documentation
+ */
+ fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
+ (fcp_cmd->add_fcp_cdb_length << 2));
+ *fcp_dl_ptr = fcp_dl;
+}
+
/**
* zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
* @adapter: adapter where scsi command is issued
@@ -2223,8 +2283,8 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
ZFCP_STATUS_COMMON_UNBLOCKED)))
return -EBUSY;
- spin_lock(&adapter->req_q.lock);
- if (!atomic_read(&adapter->req_q.count))
+ spin_lock(&adapter->req_q_lock);
+ if (!zfcp_fsf_sbal_available(adapter))
goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
adapter->pool.fsf_req_scsi);
@@ -2286,7 +2346,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
- fcp_cmnd_iu->add_fcp_cdb_length + sizeof(fcp_dl_t);
+ fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype,
scsi_sglist(scsi_cmnd),
@@ -2296,10 +2356,10 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
retval = -EIO;
else {
dev_err(&adapter->ccw_device->dev,
- "SCSI request too large. "
- "Shutting down unit 0x%016Lx on port "
- "0x%016Lx.\n", unit->fcp_lun,
- unit->port->wwpn);
+ "Oversize data package, unit 0x%016Lx "
+ "on port 0x%016Lx closed\n",
+ (unsigned long long)unit->fcp_lun,
+ (unsigned long long)unit->port->wwpn);
zfcp_erp_unit_shutdown(unit, 0, 131, req);
retval = -EINVAL;
}
@@ -2322,7 +2382,7 @@ failed_scsi_cmnd:
zfcp_fsf_req_free(req);
scsi_cmnd->host_scribble = NULL;
out:
- spin_unlock(&adapter->req_q.lock);
+ spin_unlock(&adapter->req_q_lock);
return retval;
}
@@ -2338,7 +2398,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
struct zfcp_unit *unit,
u8 tm_flags, int req_flags)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
struct fcp_cmnd_iu *fcp_cmnd_iu;
@@ -2346,8 +2406,8 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
ZFCP_STATUS_COMMON_UNBLOCKED)))
return NULL;
- spin_lock(&adapter->req_q.lock);
- if (!atomic_read(&adapter->req_q.count))
+ spin_lock(&adapter->req_q_lock);
+ if (!zfcp_fsf_sbal_available(adapter))
goto out;
req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
adapter->pool.fsf_req_scsi);
@@ -2362,7 +2422,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
- sizeof(fcp_dl_t);
+ sizeof(u32);
sbale = zfcp_qdio_sbale_req(req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
@@ -2379,7 +2439,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
zfcp_fsf_req_free(req);
req = NULL;
out:
- spin_unlock(&adapter->req_q.lock);
+ spin_unlock(&adapter->req_q_lock);
return req;
}
@@ -2398,7 +2458,7 @@ static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
struct zfcp_fsf_cfdc *fsf_cfdc)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *req = NULL;
struct fsf_qtcb_bottom_support *bottom;
int direction, retval = -EIO, bytes;
@@ -2417,7 +2477,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
return ERR_PTR(-EINVAL);
}
- spin_lock_bh(&adapter->req_q.lock);
+ spin_lock_bh(&adapter->req_q_lock);
if (zfcp_fsf_req_sbal_get(adapter))
goto out;
@@ -2447,7 +2507,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
out:
- spin_unlock_bh(&adapter->req_q.lock);
+ spin_unlock_bh(&adapter->req_q_lock);
if (!retval) {
wait_event(req->completion_wq,
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index bf94b4da076..fd3a88777ac 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -71,13 +71,6 @@
#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041
#define FSF_ELS_COMMAND_REJECTED 0x00000050
#define FSF_GENERIC_COMMAND_REJECTED 0x00000051
-#define FSF_OPERATION_PARTIALLY_SUCCESSFUL 0x00000052
-#define FSF_AUTHORIZATION_FAILURE 0x00000053
-#define FSF_CFDC_ERROR_DETECTED 0x00000054
-#define FSF_CONTROL_FILE_UPDATE_ERROR 0x00000055
-#define FSF_CONTROL_FILE_TOO_LARGE 0x00000056
-#define FSF_ACCESS_CONFLICT_DETECTED 0x00000057
-#define FSF_CONFLICTS_OVERRULED 0x00000058
#define FSF_PORT_BOXED 0x00000059
#define FSF_LUN_BOXED 0x0000005A
#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE 0x0000005B
@@ -85,9 +78,7 @@
#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
#define FSF_SBAL_MISMATCH 0x00000063
-#define FSF_OPEN_PORT_WITHOUT_PRLI 0x00000064
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
-#define FSF_FCP_RSP_AVAILABLE 0x000000AF
#define FSF_UNKNOWN_COMMAND 0x000000E2
#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
#define FSF_INVALID_COMMAND_OPTION 0x000000E5
@@ -102,20 +93,9 @@
#define FSF_SQ_RETRY_IF_POSSIBLE 0x02
#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03
#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04
-#define FSF_SQ_ULP_PROGRAMMING_ERROR 0x05
#define FSF_SQ_COMMAND_ABORTED 0x06
#define FSF_SQ_NO_RETRY_POSSIBLE 0x07
-/* FSF status qualifier for CFDC commands */
-#define FSF_SQ_CFDC_HARDENED_ON_SE 0x00000000
-#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE 0x00000001
-#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2 0x00000002
-/* CFDC subtable codes */
-#define FSF_SQ_CFDC_SUBTABLE_OS 0x0001
-#define FSF_SQ_CFDC_SUBTABLE_PORT_WWPN 0x0002
-#define FSF_SQ_CFDC_SUBTABLE_PORT_DID 0x0003
-#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
-
/* FSF status qualifier (most significant 4 bytes), local link down */
#define FSF_PSQ_LINK_NO_LIGHT 0x00000004
#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008
@@ -145,7 +125,6 @@
#define FSF_STATUS_READ_LINK_UP 0x00000006
#define FSF_STATUS_READ_NOTIFICATION_LOST 0x00000009
#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
-#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
/* status subtypes in status read buffer */
@@ -159,20 +138,9 @@
/* status subtypes for unsolicited status notification lost */
#define FSF_STATUS_READ_SUB_INCOMING_ELS 0x00000001
-#define FSF_STATUS_READ_SUB_SENSE_DATA 0x00000002
-#define FSF_STATUS_READ_SUB_LINK_STATUS 0x00000004
-#define FSF_STATUS_READ_SUB_PORT_CLOSED 0x00000008
-#define FSF_STATUS_READ_SUB_BIT_ERROR_THRESHOLD 0x00000010
#define FSF_STATUS_READ_SUB_ACT_UPDATED 0x00000020
-#define FSF_STATUS_READ_SUB_ACT_HARDENED 0x00000040
-#define FSF_STATUS_READ_SUB_FEATURE_UPDATE_ALERT 0x00000080
-
-/* status subtypes for CFDC */
-#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
-#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
/* topologie that is detected by the adapter */
-#define FSF_TOPO_ERROR 0x00000000
#define FSF_TOPO_P2P 0x00000001
#define FSF_TOPO_FABRIC 0x00000002
#define FSF_TOPO_AL 0x00000003
@@ -180,17 +148,13 @@
/* data direction for FCP commands */
#define FSF_DATADIR_WRITE 0x00000001
#define FSF_DATADIR_READ 0x00000002
-#define FSF_DATADIR_READ_WRITE 0x00000003
#define FSF_DATADIR_CMND 0x00000004
/* fc service class */
-#define FSF_CLASS_1 0x00000001
-#define FSF_CLASS_2 0x00000002
#define FSF_CLASS_3 0x00000003
/* SBAL chaining */
#define FSF_MAX_SBALS_PER_REQ 36
-#define FSF_MAX_SBALS_PER_ELS_REQ 2
/* logging space behind QTCB */
#define FSF_QTCB_LOG_SIZE 1024
@@ -200,50 +164,16 @@
#define FSF_FEATURE_LUN_SHARING 0x00000004
#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008
#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
-#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
/* host connection features */
#define FSF_FEATURE_NPIV_MODE 0x00000001
-#define FSF_FEATURE_VM_ASSIGNED_WWPN 0x00000002
/* option */
#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
-#define FSF_OPEN_LUN_REPLICATE_SENSE 0x00000002
-
-/* adapter types */
-#define FSF_ADAPTER_TYPE_FICON 0x00000001
-#define FSF_ADAPTER_TYPE_FICON_EXPRESS 0x00000002
-
-/* port types */
-#define FSF_HBA_PORTTYPE_UNKNOWN 0x00000001
-#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
-#define FSF_HBA_PORTTYPE_NPORT 0x00000005
-#define FSF_HBA_PORTTYPE_PTP 0x00000021
-/* following are not defined and used by FSF Spec
- but are additionally defined by FC-HBA */
-#define FSF_HBA_PORTTYPE_OTHER 0x00000002
-#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
-#define FSF_HBA_PORTTYPE_NLPORT 0x00000006
-#define FSF_HBA_PORTTYPE_FLPORT 0x00000007
-#define FSF_HBA_PORTTYPE_FPORT 0x00000008
-#define FSF_HBA_PORTTYPE_LPORT 0x00000020
-
-/* port states */
-#define FSF_HBA_PORTSTATE_UNKNOWN 0x00000001
-#define FSF_HBA_PORTSTATE_ONLINE 0x00000002
-#define FSF_HBA_PORTSTATE_OFFLINE 0x00000003
-#define FSF_HBA_PORTSTATE_LINKDOWN 0x00000006
-#define FSF_HBA_PORTSTATE_ERROR 0x00000007
-
-/* IO states of adapter */
-#define FSF_IOSTAT_NPORT_RJT 0x00000004
-#define FSF_IOSTAT_FABRIC_RJT 0x00000005
-#define FSF_IOSTAT_LS_RJT 0x00000009
/* open LUN access flags*/
-#define FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED 0x01000000
#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000
#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000
@@ -265,11 +195,6 @@ struct fsf_queue_designator {
u32 res1;
} __attribute__ ((packed));
-struct fsf_port_closed_payload {
- struct fsf_queue_designator queue_designator;
- u32 port_handle;
-} __attribute__ ((packed));
-
struct fsf_bit_error_payload {
u32 res1;
u32 link_failure_error_count;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 69d632d851d..3e05080e62d 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -28,7 +28,7 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
return 0;
}
-static volatile struct qdio_buffer_element *
+static struct qdio_buffer_element *
zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
{
return &q->sbal[sbal_idx]->element[sbale_idx];
@@ -57,7 +57,7 @@ void zfcp_qdio_free(struct zfcp_adapter *adapter)
static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id)
{
- dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n");
+ dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
zfcp_erp_adapter_reopen(adapter,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
@@ -145,7 +145,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
struct zfcp_qdio_queue *queue = &adapter->resp_q;
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
int sbal_idx, sbale_idx, sbal_no;
if (unlikely(qdio_err)) {
@@ -174,8 +174,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
dev_warn(&adapter->ccw_device->dev,
- "Protocol violation by adapter. "
- "Continuing operations.\n");
+ "A QDIO protocol error occurred, "
+ "operations continue\n");
}
/*
@@ -190,8 +190,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
* @fsf_req: pointer to struct fsf_req
* Returns: pointer to qdio_buffer_element (SBALE) structure
*/
-volatile struct qdio_buffer_element *
-zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
+struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
{
return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0);
}
@@ -201,8 +200,7 @@ zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
* @fsf_req: pointer to struct fsf_req
* Returns: pointer to qdio_buffer_element (SBALE) structure
*/
-volatile struct qdio_buffer_element *
-zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
+struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
{
return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last,
req->sbale_curr);
@@ -216,10 +214,10 @@ static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
% QDIO_MAX_BUFFERS_PER_Q;
}
-static volatile struct qdio_buffer_element *
+static struct qdio_buffer_element *
zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
/* set last entry flag in current SBALE of current SBAL */
sbale = zfcp_qdio_sbale_curr(fsf_req);
@@ -250,7 +248,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
return sbale;
}
-static volatile struct qdio_buffer_element *
+static struct qdio_buffer_element *
zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
{
if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -273,7 +271,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
unsigned int sbtype, void *start_addr,
unsigned int total_length)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
unsigned long remaining, length;
void *addr;
@@ -282,6 +280,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
addr += length, remaining -= length) {
sbale = zfcp_qdio_sbale_next(fsf_req, sbtype);
if (!sbale) {
+ atomic_inc(&fsf_req->adapter->qdio_outb_full);
zfcp_qdio_undo_sbals(fsf_req);
return -EINVAL;
}
@@ -307,7 +306,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
struct scatterlist *sg, int max_sbals)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
int retval, bytes = 0;
/* figure out last allowed SBAL */
@@ -344,10 +343,10 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
int first = fsf_req->sbal_first;
int count = fsf_req->sbal_number;
int retval, pci, pci_batch;
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
/* acknowledgements for transferred buffers */
- pci_batch = req_q->pci_batch + count;
+ pci_batch = adapter->req_q_pci_batch + count;
if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) {
pci_batch %= ZFCP_QDIO_PCI_INTERVAL;
pci = first + count - (pci_batch + 1);
@@ -367,7 +366,7 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
atomic_sub(count, &req_q->count);
req_q->first += count;
req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
- req_q->pci_batch = pci_batch;
+ adapter->req_q_pci_batch = pci_batch;
return 0;
}
@@ -418,14 +417,14 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
struct zfcp_qdio_queue *req_q;
int first, count;
- if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
+ if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return;
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
req_q = &adapter->req_q;
- spin_lock_bh(&req_q->lock);
+ spin_lock_bh(&adapter->req_q_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
- spin_unlock_bh(&req_q->lock);
+ spin_unlock_bh(&adapter->req_q_lock);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
@@ -438,7 +437,7 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
}
req_q->first = 0;
atomic_set(&req_q->count, 0);
- req_q->pci_batch = 0;
+ adapter->req_q_pci_batch = 0;
adapter->resp_q.first = 0;
atomic_set(&adapter->resp_q.count, 0);
}
@@ -450,23 +449,17 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
*/
int zfcp_qdio_open(struct zfcp_adapter *adapter)
{
- volatile struct qdio_buffer_element *sbale;
+ struct qdio_buffer_element *sbale;
int cc;
- if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
+ if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
return -EIO;
- if (qdio_establish(&adapter->qdio_init_data)) {
- dev_err(&adapter->ccw_device->dev,
- "Establish of QDIO queues failed.\n");
- return -EIO;
- }
+ if (qdio_establish(&adapter->qdio_init_data))
+ goto failed_establish;
- if (qdio_activate(adapter->ccw_device)) {
- dev_err(&adapter->ccw_device->dev,
- "Activate of QDIO queues failed.\n");
+ if (qdio_activate(adapter->ccw_device))
goto failed_qdio;
- }
for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
sbale = &(adapter->resp_q.sbal[cc]->element[0]);
@@ -476,20 +469,20 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
}
if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
- QDIO_MAX_BUFFERS_PER_Q)) {
- dev_err(&adapter->ccw_device->dev,
- "Init of QDIO response queue failed.\n");
+ QDIO_MAX_BUFFERS_PER_Q))
goto failed_qdio;
- }
/* set index of first avalable SBALS / number of available SBALS */
adapter->req_q.first = 0;
atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
- adapter->req_q.pci_batch = 0;
+ adapter->req_q_pci_batch = 0;
return 0;
failed_qdio:
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
+failed_establish:
+ dev_err(&adapter->ccw_device->dev,
+ "Setting up the QDIO connection to the FCP adapter failed\n");
return -EIO;
}
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index aeae56b00b4..ca8f85f3dad 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -21,20 +21,6 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
return fcp_sns_info_ptr;
}
-void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
-{
- fcp_dl_t *fcp_dl_ptr;
-
- /*
- * fcp_dl_addr = start address of fcp_cmnd structure +
- * size of fixed part + size of dynamically sized add_dcp_cdb field
- * SEE FCP-2 documentation
- */
- fcp_dl_ptr = (fcp_dl_t *) ((unsigned char *) &fcp_cmd[1] +
- (fcp_cmd->add_fcp_cdb_length << 2));
- *fcp_dl_ptr = fcp_dl;
-}
-
static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
{
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@@ -119,13 +105,17 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
{
struct zfcp_port *port;
struct zfcp_unit *unit;
+ int scsi_lun;
list_for_each_entry(port, &adapter->port_list_head, list) {
if (!port->rport || (id != port->rport->scsi_target_id))
continue;
- list_for_each_entry(unit, &port->unit_list_head, list)
- if (lun == unit->scsi_lun)
+ list_for_each_entry(unit, &port->unit_list_head, list) {
+ scsi_lun = scsilun_to_int(
+ (struct scsi_lun *)&unit->fcp_lun);
+ if (lun == scsi_lun)
return unit;
+ }
}
return NULL;
@@ -183,7 +173,6 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
return retval;
}
fsf_req->data = NULL;
- fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
/* don't access old fsf_req after releasing the abort_lock */
write_unlock_irqrestore(&adapter->abort_lock, flags);
@@ -294,7 +283,8 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
sizeof (struct zfcp_adapter *));
if (!adapter->scsi_host) {
dev_err(&adapter->ccw_device->dev,
- "registration with SCSI stack failed.");
+ "Registering the FCP device with the "
+ "SCSI stack failed\n");
return -EIO;
}
@@ -312,7 +302,6 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
scsi_host_put(adapter->scsi_host);
return -EIO;
}
- atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
return 0;
}
@@ -336,7 +325,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
scsi_remove_host(shost);
scsi_host_put(shost);
adapter->scsi_host = NULL;
- atomic_clear_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
return;
}
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 2e85c6c49e7..ca9293ba176 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -26,9 +26,9 @@ static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n",
atomic_read(&adapter->status));
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n",
- adapter->peer_wwnn);
+ (unsigned long long) adapter->peer_wwnn);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n",
- adapter->peer_wwpn);
+ (unsigned long long) adapter->peer_wwpn);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n",
adapter->peer_d_id);
ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n",
@@ -135,8 +135,9 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
{
struct zfcp_adapter *adapter = dev_get_drvdata(dev);
struct zfcp_port *port;
- wwn_t wwpn;
+ u64 wwpn;
int retval = 0;
+ LIST_HEAD(port_remove_lh);
down(&zfcp_data.config_sema);
if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
@@ -144,7 +145,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
goto out;
}
- if (strict_strtoull(buf, 0, &wwpn)) {
+ if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) {
retval = -EINVAL;
goto out;
}
@@ -154,7 +155,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
if (port && (atomic_read(&port->refcount) == 0)) {
zfcp_port_get(port);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
- list_move(&port->list, &adapter->port_remove_lh);
+ list_move(&port->list, &port_remove_lh);
} else
port = NULL;
write_unlock_irq(&zfcp_data.config_lock);
@@ -200,7 +201,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
{
struct zfcp_port *port = dev_get_drvdata(dev);
struct zfcp_unit *unit;
- fcp_lun_t fcp_lun;
+ u64 fcp_lun;
int retval = -EINVAL;
down(&zfcp_data.config_sema);
@@ -209,7 +210,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
goto out;
}
- if (strict_strtoull(buf, 0, &fcp_lun))
+ if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
goto out;
unit = zfcp_unit_enqueue(port, fcp_lun);
@@ -233,8 +234,9 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
{
struct zfcp_port *port = dev_get_drvdata(dev);
struct zfcp_unit *unit;
- fcp_lun_t fcp_lun;
+ u64 fcp_lun;
int retval = 0;
+ LIST_HEAD(unit_remove_lh);
down(&zfcp_data.config_sema);
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
@@ -242,7 +244,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
goto out;
}
- if (strict_strtoull(buf, 0, &fcp_lun)) {
+ if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) {
retval = -EINVAL;
goto out;
}
@@ -252,7 +254,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
if (unit && (atomic_read(&unit->refcount) == 0)) {
zfcp_unit_get(unit);
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
- list_move(&unit->list, &port->unit_remove_lh);
+ list_move(&unit->list, &unit_remove_lh);
} else
unit = NULL;
@@ -273,22 +275,7 @@ out:
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
-static struct attribute *zfcp_port_ns_attrs[] = {
- &dev_attr_port_failed.attr,
- &dev_attr_port_in_recovery.attr,
- &dev_attr_port_status.attr,
- &dev_attr_port_access_denied.attr,
- NULL
-};
-
-/**
- * zfcp_sysfs_ns_port_attrs - sysfs attributes for nameserver
- */
-struct attribute_group zfcp_sysfs_ns_port_attrs = {
- .attrs = zfcp_port_ns_attrs,
-};
-
-static struct attribute *zfcp_port_no_ns_attrs[] = {
+static struct attribute *zfcp_port_attrs[] = {
&dev_attr_unit_add.attr,
&dev_attr_unit_remove.attr,
&dev_attr_port_failed.attr,
@@ -302,7 +289,7 @@ static struct attribute *zfcp_port_no_ns_attrs[] = {
* zfcp_sysfs_port_attrs - sysfs attributes for all other ports
*/
struct attribute_group zfcp_sysfs_port_attrs = {
- .attrs = zfcp_port_no_ns_attrs,
+ .attrs = zfcp_port_attrs,
};
static struct attribute *zfcp_unit_attrs[] = {
@@ -394,9 +381,11 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \
static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
- unit->port->adapter->ccw_device->dev.bus_id);
-ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn);
-ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun);
+ dev_name(&unit->port->adapter->ccw_device->dev));
+ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
+ (unsigned long long) unit->port->wwpn);
+ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n",
+ (unsigned long long) unit->fcp_lun);
struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
&dev_attr_fcp_lun,
@@ -487,10 +476,23 @@ ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
(unsigned long long) stat_info.seconds_act);
+static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *scsi_host = class_to_shost(dev);
+ struct zfcp_adapter *adapter =
+ (struct zfcp_adapter *) scsi_host->hostdata[0];
+
+ return sprintf(buf, "%d\n", atomic_read(&adapter->qdio_outb_full));
+}
+static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
+
struct device_attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_utilization,
&dev_attr_requests,
&dev_attr_megabytes,
&dev_attr_seconds_active,
+ &dev_attr_queue_full,
NULL
};
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4e0322b1c1e..d3b211af4e1 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1325,14 +1325,6 @@ config SCSI_QLOGIC_FAS
To compile this driver as a module, choose M here: the
module will be called qlogicfas.
-config SCSI_QLOGIC_FC_FIRMWARE
- bool "Include loadable firmware in driver"
- depends on SCSI_QLOGIC_FC
- help
- Say Y to include ISP2X00 Fabric Initiator/Target Firmware, with
- expanded LUN addressing and FcTape (FCP-2) support, in the
- qlogicfc driver. This is required on some platforms.
-
config SCSI_QLOGIC_1280
tristate "Qlogic QLA 1240/1x80/1x160 SCSI support"
depends on PCI && SCSI
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index aa4e77c2527..8abfd06b5a7 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
srbcmd->id = cpu_to_le32(scmd_id(cmd));
srbcmd->lun = cpu_to_le32(cmd->device->lun);
srbcmd->flags = cpu_to_le32(flag);
- timeout = cmd->timeout_per_command/HZ;
+ timeout = cmd->request->timeout/HZ;
if (timeout == 0)
timeout = 1;
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index ef693e8412e..8f45570a8a0 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -84,7 +84,7 @@ struct clariion_dh_data {
/*
* I/O buffer for both MODE_SELECT and INQUIRY commands.
*/
- char buffer[CLARIION_BUFFER_SIZE];
+ unsigned char buffer[CLARIION_BUFFER_SIZE];
/*
* SCSI sense buffer for commands -- assumes serial issuance
* and completion sequence of all commands for same multipath.
@@ -176,7 +176,7 @@ static int parse_sp_info_reply(struct scsi_device *sdev,
err = SCSI_DH_DEV_TEMP_BUSY;
goto out;
}
- if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) {
+ if (csdev->buffer[4] > 2) {
/* Invalid buffer format */
sdev_printk(KERN_NOTICE, sdev,
"%s: invalid VPD page 0xC0 format\n",
@@ -278,7 +278,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
return NULL;
}
- memset(rq->cmd, 0, BLK_MAX_CDB);
rq->cmd_len = COMMAND_SIZE(cmd);
rq->cmd[0] = cmd;
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index a6a4ef3ad51..5e93c88ad66 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -114,7 +114,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
- memset(req->cmd, 0, MAX_COMMAND_SIZE);
req->cmd[0] = TEST_UNIT_READY;
req->timeout = HP_SW_TIMEOUT;
req->sense = h->sense;
@@ -207,7 +206,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_FAILFAST;
req->cmd_len = COMMAND_SIZE(START_STOP);
- memset(req->cmd, 0, MAX_COMMAND_SIZE);
req->cmd[0] = START_STOP;
req->cmd[4] = 1; /* Start spin cycle */
req->timeout = HP_SW_TIMEOUT;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 6e2f130d56d..50bf95f3b5c 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -225,8 +225,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
return NULL;
}
- memset(rq->cmd, 0, BLK_MAX_CDB);
-
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
rq->retries = RDAC_RETRIES;
@@ -590,6 +588,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"STK", "OPENstorage D280"},
{"SUN", "CSM200_R"},
{"SUN", "LCSM100_F"},
+ {"DELL", "MD3000"},
+ {"DELL", "MD3000i"},
{NULL, NULL},
};
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 822d5214692..c387c15a212 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
/* use request field to save the ptr. to completion struct. */
scp->request = (struct request *)&wait;
- scp->timeout_per_command = timeout*HZ;
scp->cmd_len = 12;
scp->cmnd = cmnd;
cmndinfo.priority = IOCTL_PRI;
@@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
register Scsi_Cmnd *pscp;
register Scsi_Cmnd *nscp;
ulong flags;
- unchar b, t;
TRACE(("gdth_putq() priority %d\n",priority));
spin_lock_irqsave(&ha->smp_lock, flags);
- if (!cmndinfo->internal_command) {
+ if (!cmndinfo->internal_command)
cmndinfo->priority = priority;
- b = scp->device->channel;
- t = scp->device->id;
- if (priority >= DEFAULT_PRI) {
- if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
- (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
- TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
- cmndinfo->timeout = gdth_update_timeout(scp, 0);
- }
- }
- }
if (ha->req_first==NULL) {
ha->req_first = scp; /* queue was empty */
@@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi_Host *shp)
return ((const char *)ha->binfo.type_string);
}
+static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
+{
+ gdth_ha_str *ha = shost_priv(scp->device->host);
+ struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
+ unchar b, t;
+ ulong flags;
+ enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
+
+ TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
+ b = scp->device->channel;
+ t = scp->device->id;
+
+ /*
+ * We don't really honor the command timeout, but we try to
+ * honor 6 times of the actual command timeout! So reset the
+ * timer if this is less than 6th timeout on this command!
+ */
+ if (++cmndinfo->timeout_count < 6)
+ retval = BLK_EH_RESET_TIMER;
+
+ /* Reset the timeout if it is locked IO */
+ spin_lock_irqsave(&ha->smp_lock, flags);
+ if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
+ (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
+ TRACE2(("%s(): locked IO, reset timeout\n", __func__));
+ retval = BLK_EH_RESET_TIMER;
+ }
+ spin_unlock_irqrestore(&ha->smp_lock, flags);
+
+ return retval;
+}
+
+
static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
{
gdth_ha_str *ha = shost_priv(scp->device->host);
@@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi_cmnd *scp,
BUG_ON(!cmndinfo);
scp->scsi_done = done;
- gdth_update_timeout(scp, scp->timeout_per_command * 6);
+ cmndinfo->timeout_count = 0;
cmndinfo->priority = DEFAULT_PRI;
return __gdth_queuecommand(ha, scp, cmndinfo);
@@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg)
ha->hdr[j].lock = 1;
spin_unlock_irqrestore(&ha->smp_lock, flags);
gdth_wait_completion(ha, ha->bus_cnt, j);
- gdth_stop_timeout(ha, ha->bus_cnt, j);
} else {
spin_lock_irqsave(&ha->smp_lock, flags);
ha->hdr[j].lock = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
- gdth_start_timeout(ha, ha->bus_cnt, j);
gdth_next(ha);
}
}
@@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
spin_lock_irqsave(&ha->smp_lock, flags);
ha->raw[i].lock = 1;
spin_unlock_irqrestore(&ha->smp_lock, flags);
- for (j = 0; j < ha->tid_cnt; ++j) {
+ for (j = 0; j < ha->tid_cnt; ++j)
gdth_wait_completion(ha, i, j);
- gdth_stop_timeout(ha, i, j);
- }
} else {
spin_lock_irqsave(&ha->smp_lock, flags);
ha->raw[i].lock = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
- for (j = 0; j < ha->tid_cnt; ++j) {
- gdth_start_timeout(ha, i, j);
+ for (j = 0; j < ha->tid_cnt; ++j)
gdth_next(ha);
- }
}
}
break;
@@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_template = {
.slave_configure = gdth_slave_configure,
.bios_param = gdth_bios_param,
.proc_info = gdth_proc_info,
+ .eh_timed_out = gdth_timed_out,
.proc_name = "gdth",
.can_queue = GDTH_MAXCMDS,
.this_id = -1,
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index ca92476727c..1646444e9bd 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -916,7 +916,7 @@ typedef struct {
gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
dma_addr_t sense_paddr; /* sense dma-addr */
unchar priority;
- int timeout;
+ int timeout_count; /* # of timeout calls */
volatile int wait_for_completion;
ushort status;
ulong32 info;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index ce0228e26ae..59349a316e1 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
-
-static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id)
-{
- ulong flags;
- Scsi_Cmnd *scp;
- unchar b, t;
-
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- if (!cmndinfo->internal_command) {
- b = scp->device->channel;
- t = scp->device->id;
- if (t == (unchar)id && b == (unchar)busnum) {
- TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
- cmndinfo->timeout = gdth_update_timeout(scp, 0);
- }
- }
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-}
-
-static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id)
-{
- ulong flags;
- Scsi_Cmnd *scp;
- unchar b, t;
-
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- if (!cmndinfo->internal_command) {
- b = scp->device->channel;
- t = scp->device->id;
- if (t == (unchar)id && b == (unchar)busnum) {
- TRACE2(("gdth_start_timeout(): update_timeout()\n"));
- gdth_update_timeout(scp, cmndinfo->timeout);
- }
- }
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-}
-
-static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout)
-{
- int oldto;
-
- oldto = scp->timeout_per_command;
- scp->timeout_per_command = timeout;
-
- if (timeout == 0) {
- del_timer(&scp->eh_timeout);
- scp->eh_timeout.data = (unsigned long) NULL;
- scp->eh_timeout.expires = 0;
- } else {
- if (scp->eh_timeout.data != (unsigned long) NULL)
- del_timer(&scp->eh_timeout);
- scp->eh_timeout.data = (unsigned long) scp;
- scp->eh_timeout.expires = jiffies + timeout;
- add_timer(&scp->eh_timeout);
- }
-
- return oldto;
-}
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index 45e6fdacf36..9b900cc9ebe 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
ulong64 *paddr);
static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
-static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id);
-static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id);
-static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout);
#endif
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index fed0b02ebc1..3fdbb13e80a 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -464,7 +464,7 @@ static int __scsi_host_match(struct device *dev, void *data)
struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
{
struct device *cdev;
- struct Scsi_Host *shost = ERR_PTR(-ENXIO);
+ struct Scsi_Host *shost = NULL;
cdev = class_find_device(&shost_class, NULL, &hostnum,
__scsi_host_match);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 7b1502c0ab6..87e09f35d3d 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
init_event_struct(evt_struct,
handle_cmd_rsp,
VIOSRP_SRP_FORMAT,
- cmnd->timeout_per_command/HZ);
+ cmnd->request->timeout/HZ);
evt_struct->cmnd = cmnd;
evt_struct->cmnd_done = done;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 461331d3dc4..81c16cba541 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -612,7 +612,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
pc->scsi_cmd = cmd;
pc->done = done;
- pc->timeout = jiffies + cmd->timeout_per_command;
+ pc->timeout = jiffies + cmd->request->timeout;
if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e7a3a655442..d30eb7ba018 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3670,7 +3670,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
sdev->no_uld_attach = 1;
}
if (ipr_is_vset_device(res)) {
- sdev->timeout = IPR_VSET_RW_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue,
+ IPR_VSET_RW_TIMEOUT);
blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
}
if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bc9e6ddf41d..ef683f0d2b5 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
scb->cmd.dcdb.segment_4G = 0;
scb->cmd.dcdb.enhanced_sg = 0;
- TimeOut = scb->scsi_cmd->timeout_per_command;
+ TimeOut = scb->scsi_cmd->request->timeout;
if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
if (!scb->sg_len) {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 299e075a7b3..da7b67d30d9 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1456,7 +1456,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
if (lun == task->sc->device->lun || lun == -1) {
debug_scsi("failing in progress sc %p itt 0x%x\n",
task->sc, task->itt);
- fail_command(conn, task, DID_BUS_BUSY << 16);
+ fail_command(conn, task, error << 16);
}
}
}
@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
scsi_queue_work(conn->session->host, &conn->xmitwork);
}
-static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct iscsi_conn *conn;
- enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+ enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
cls_session = starget_to_session(scsi_target(scmd->device));
session = cls_session->dd_data;
@@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
*/
- rc = EH_RESET_TIMER;
+ rc = BLK_EH_RESET_TIMER;
goto done;
}
conn = session->leadconn;
if (!conn) {
/* In the middle of shuting down */
- rc = EH_RESET_TIMER;
+ rc = BLK_EH_RESET_TIMER;
goto done;
}
@@ -1513,20 +1513,21 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
*/
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
(conn->ping_timeout * HZ), jiffies))
- rc = EH_RESET_TIMER;
+ rc = BLK_EH_RESET_TIMER;
/*
* if we are about to check the transport then give the command
* more time
*/
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
jiffies))
- rc = EH_RESET_TIMER;
+ rc = BLK_EH_RESET_TIMER;
/* if in the middle of checking the transport then give us more time */
if (conn->ping_task)
- rc = EH_RESET_TIMER;
+ rc = BLK_EH_RESET_TIMER;
done:
spin_unlock(&session->lock);
- debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
+ debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
+ "timer reset" : "nh");
return rc;
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 48ee8c7f5bd..e1550117069 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -294,10 +294,10 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
}
}
-static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
+static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in,
u32 val)
{
- struct domain_device *dev = ap->private_data;
+ struct domain_device *dev = link->ap->private_data;
SAS_DPRINTK("STUB %s\n", __func__);
switch (sc_reg_in) {
@@ -319,10 +319,10 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
return 0;
}
-static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
+static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
u32 *val)
{
- struct domain_device *dev = ap->private_data;
+ struct domain_device *dev = link->ap->private_data;
SAS_DPRINTK("STUB %s\n", __func__);
switch (sc_reg_in) {
@@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task *task)
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
- scsi_req_abort_cmd(qc->scsicmd);
+ blk_abort_request(qc->scsicmd->request);
scsi_schedule_eh(qc->scsicmd->device->host);
return;
}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index b4f9368f116..0001374bd6b 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
int sas_register_ports(struct sas_ha_struct *sas_ha);
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
-enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
+enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
int sas_init_queue(struct sas_ha_struct *sas_ha);
int sas_init_events(struct sas_ha_struct *sas_ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index a8e3ef30907..744838780ad 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -673,43 +673,43 @@ out:
return;
}
-enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
+enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct sas_task *task = TO_SAS_TASK(cmd);
unsigned long flags;
if (!task) {
- cmd->timeout_per_command /= 2;
+ cmd->request->timeout /= 2;
SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
- cmd, task, (cmd->timeout_per_command ?
- "EH_RESET_TIMER" : "EH_NOT_HANDLED"));
- if (!cmd->timeout_per_command)
- return EH_NOT_HANDLED;
- return EH_RESET_TIMER;
+ cmd, task, (cmd->request->timeout ?
+ "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
+ if (!cmd->request->timeout)
+ return BLK_EH_NOT_HANDLED;
+ return BLK_EH_RESET_TIMER;
}
spin_lock_irqsave(&task->task_state_lock, flags);
BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
- cmd, task);
- return EH_HANDLED;
+ SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
+ "BLK_EH_HANDLED\n", cmd, task);
+ return BLK_EH_HANDLED;
}
if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
- "EH_RESET_TIMER\n",
+ "BLK_EH_RESET_TIMER\n",
cmd, task);
- return EH_RESET_TIMER;
+ return BLK_EH_RESET_TIMER;
}
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n",
+ SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
cmd, task);
- return EH_NOT_HANDLED;
+ return BLK_EH_NOT_HANDLED;
}
int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
@@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *task)
return;
}
- scsi_req_abort_cmd(sc);
+ blk_abort_request(sc->request);
scsi_schedule_eh(sc->device->host);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 97b763378e7..afe1de99876 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1167,7 +1167,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
* cmd has not been completed within the timeout period.
*/
static enum
-scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
+blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
{
struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
struct megasas_instance *instance;
@@ -1175,7 +1175,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
if (time_after(jiffies, scmd->jiffies_at_alloc +
(MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
- return EH_NOT_HANDLED;
+ return BLK_EH_NOT_HANDLED;
}
instance = cmd->instance;
@@ -1189,7 +1189,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
spin_unlock_irqrestore(instance->host->host_lock, flags);
}
- return EH_RESET_TIMER;
+ return BLK_EH_RESET_TIMER;
}
/**
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index c57c94c0ffd..3b7240e4081 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
**
**----------------------------------------------------
*/
- if (np->settle_time && cmd->timeout_per_command >= HZ) {
- u_long tlimit = jiffies + cmd->timeout_per_command - HZ;
+ if (np->settle_time && cmd->request->timeout >= HZ) {
+ u_long tlimit = jiffies + cmd->request->timeout - HZ;
if (time_after(np->settle_time, tlimit))
np->settle_time = tlimit;
}
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 37f9ba0cd79..b6cd12b2e99 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2845,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
+ pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
@@ -3114,7 +3114,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
/* Set ISP command timeout. */
- pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
+ pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
/* Set device target ID and LUN */
pkt->lun = SCSI_LUN_32(cmd);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 45e7dcb4b34..0ddfe7106b3 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -292,10 +292,11 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
valid = 0;
if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
valid = 1;
- else if (start == (FA_BOOT_CODE_ADDR*4) ||
- start == (FA_RISC_CODE_ADDR*4))
+ else if (start == (ha->flt_region_boot * 4) ||
+ start == (ha->flt_region_fw * 4))
valid = 1;
- else if (IS_QLA25XX(ha) && start == (FA_VPD_NVRAM_ADDR*4))
+ else if (IS_QLA25XX(ha) &&
+ start == (ha->flt_region_vpd_nvram * 4))
valid = 1;
if (!valid) {
qla_printk(KERN_WARNING, ha,
@@ -1065,6 +1066,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat->dumped_frames = stats->dumped_frames;
pfc_host_stat->nos_count = stats->nos_rcvd;
}
+ pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
+ pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
done_free:
dma_pool_free(ha->s_dma_pool, stats, stats_dma);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 94a720eabfd..83c81921677 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -25,7 +25,6 @@
#include <linux/firmware.h>
#include <linux/aer.h>
#include <linux/mutex.h>
-#include <linux/semaphore.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -2157,6 +2156,8 @@ struct qla_chip_state_84xx {
struct qla_statistics {
uint32_t total_isp_aborts;
+ uint64_t input_bytes;
+ uint64_t output_bytes;
};
/*
@@ -2238,6 +2239,7 @@ typedef struct scsi_qla_host {
#define FCPORT_UPDATE_NEEDED 27
#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
#define UNLOADING 29
+#define NPIV_CONFIG_NEEDED 30
uint32_t device_flags;
#define DFLG_LOCAL_DEVICES BIT_0
@@ -2507,7 +2509,6 @@ typedef struct scsi_qla_host {
uint64_t fce_wr, fce_rd;
struct mutex fce_mutex;
- uint32_t hw_event_start;
uint32_t hw_event_ptr;
uint32_t hw_event_pause_errors;
@@ -2553,6 +2554,14 @@ typedef struct scsi_qla_host {
uint32_t fdt_unprotect_sec_cmd;
uint32_t fdt_protect_sec_cmd;
+ uint32_t flt_region_flt;
+ uint32_t flt_region_fdt;
+ uint32_t flt_region_boot;
+ uint32_t flt_region_fw;
+ uint32_t flt_region_vpd_nvram;
+ uint32_t flt_region_hw_event;
+ uint32_t flt_region_npiv_conf;
+
/* Needed for BEACON */
uint16_t beacon_blink_led;
uint8_t beacon_color_state;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index cf194517400..d1d14202575 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -789,14 +789,23 @@ struct device_reg_24xx {
#define FA_RISC_CODE_ADDR 0x20000
#define FA_RISC_CODE_SEGMENTS 2
+#define FA_FLASH_DESCR_ADDR_24 0x11000
+#define FA_FLASH_LAYOUT_ADDR_24 0x11400
+#define FA_NPIV_CONF0_ADDR_24 0x16000
+#define FA_NPIV_CONF1_ADDR_24 0x17000
+
#define FA_FW_AREA_ADDR 0x40000
#define FA_VPD_NVRAM_ADDR 0x48000
#define FA_FEATURE_ADDR 0x4C000
#define FA_FLASH_DESCR_ADDR 0x50000
+#define FA_FLASH_LAYOUT_ADDR 0x50400
#define FA_HW_EVENT0_ADDR 0x54000
-#define FA_HW_EVENT1_ADDR 0x54200
+#define FA_HW_EVENT1_ADDR 0x54400
#define FA_HW_EVENT_SIZE 0x200
#define FA_HW_EVENT_ENTRY_SIZE 4
+#define FA_NPIV_CONF0_ADDR 0x5C000
+#define FA_NPIV_CONF1_ADDR 0x5D000
+
/*
* Flash Error Log Event Codes.
*/
@@ -806,10 +815,6 @@ struct device_reg_24xx {
#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023
#define HW_EVENT_FLASH_FW_ERR 0xF024
-#define FA_BOOT_LOG_ADDR 0x58000
-#define FA_FW_DUMP0_ADDR 0x60000
-#define FA_FW_DUMP1_ADDR 0x70000
-
uint32_t flash_data; /* Flash/NVRAM BIOS data. */
uint32_t ctrl_status; /* Control/Status. */
@@ -1203,6 +1208,62 @@ struct qla_fdt_layout {
uint8_t unused2[65];
};
+/* Flash Layout Table ********************************************************/
+
+struct qla_flt_location {
+ uint8_t sig[4];
+ uint32_t start_lo;
+ uint32_t start_hi;
+ uint16_t unused;
+ uint16_t checksum;
+};
+
+struct qla_flt_header {
+ uint16_t version;
+ uint16_t length;
+ uint16_t checksum;
+ uint16_t unused;
+};
+
+#define FLT_REG_FW 0x01
+#define FLT_REG_BOOT_CODE 0x07
+#define FLT_REG_VPD_0 0x14
+#define FLT_REG_NVRAM_0 0x15
+#define FLT_REG_VPD_1 0x16
+#define FLT_REG_NVRAM_1 0x17
+#define FLT_REG_FDT 0x1a
+#define FLT_REG_FLT 0x1c
+#define FLT_REG_HW_EVENT_0 0x1d
+#define FLT_REG_HW_EVENT_1 0x1f
+#define FLT_REG_NPIV_CONF_0 0x29
+#define FLT_REG_NPIV_CONF_1 0x2a
+
+struct qla_flt_region {
+ uint32_t code;
+ uint32_t size;
+ uint32_t start;
+ uint32_t end;
+};
+
+/* Flash NPIV Configuration Table ********************************************/
+
+struct qla_npiv_header {
+ uint8_t sig[2];
+ uint16_t version;
+ uint16_t entries;
+ uint16_t unused[4];
+ uint16_t checksum;
+};
+
+struct qla_npiv_entry {
+ uint16_t flags;
+ uint16_t vf_id;
+ uint16_t qos;
+ uint16_t unused1;
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+};
+
/* 84XX Support **************************************************************/
#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b156735e9a..753dbe6cce6 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -313,9 +313,11 @@ extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
uint16_t, uint16_t);
-extern void qla2xxx_get_flash_info(scsi_qla_host_t *);
+extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
+extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
+
/*
* Global Function Prototypes in qla_dbg.c source file.
*/
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ee89ddd64aa..a470f2d3270 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -83,6 +83,13 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
ha->isp_ops->reset_chip(ha);
+ rval = qla2xxx_get_flash_info(ha);
+ if (rval) {
+ DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
+ ha->host_no));
+ return (rval);
+ }
+
ha->isp_ops->get_flash_version(ha, ha->request_ring);
qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
@@ -109,7 +116,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
rval = qla2x00_setup_chip(ha);
if (rval)
return (rval);
- qla2xxx_get_flash_info(ha);
}
if (IS_QLA84XX(ha)) {
ha->cs84xx = qla84xx_get_chip(ha);
@@ -2016,7 +2022,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
DEBUG3(printk("%s: exiting normally\n", __func__));
}
- /* Restore state if a resync event occured during processing */
+ /* Restore state if a resync event occurred during processing */
if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
@@ -2561,7 +2567,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
rval = QLA_SUCCESS;
/* Try GID_PT to get device list, else GAN. */
- swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_ATOMIC);
+ swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
if (!swl) {
/*EMPTY*/
DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
@@ -3751,7 +3757,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
rval = QLA_SUCCESS;
segments = FA_RISC_CODE_SEGMENTS;
- faddr = FA_RISC_CODE_ADDR;
+ faddr = ha->flt_region_fw;
dcode = (uint32_t *)ha->request_ring;
*srisc_addr = 0;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 92fafbdbbaa..e90afad120e 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -52,7 +52,7 @@ to_qla_parent(scsi_qla_host_t *ha)
* @ha: HA context
* @ha_locked: is function called with the hardware lock
*
- * Returns non-zero if a failure occured, else zero.
+ * Returns non-zero if a failure occurred, else zero.
*/
static inline int
qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d57669aa461..85bc0a48598 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -21,17 +21,22 @@ static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
* Returns the proper CF_* direction based on CDB.
*/
static inline uint16_t
-qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
+qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
cflags = 0;
/* Set transfer direction */
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
- else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ sp->fcport->ha->qla_stats.output_bytes +=
+ scsi_bufflen(sp->cmd);
+ } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
+ sp->fcport->ha->qla_stats.input_bytes +=
+ scsi_bufflen(sp->cmd);
+ }
return (cflags);
}
@@ -169,7 +174,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
ha = sp->ha;
- cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
+ cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Three DSDs are available in the Command Type 2 IOCB */
avail_dsds = 3;
@@ -228,7 +233,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
ha = sp->ha;
- cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
+ cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Two DSDs are available in the Command Type 3 IOCB */
avail_dsds = 2;
@@ -262,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* qla2x00_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
*
- * Returns non-zero if a failure occured, else zero.
+ * Returns non-zero if a failure occurred, else zero.
*/
int
qla2x00_start_scsi(srb_t *sp)
@@ -407,7 +412,7 @@ queuing_error:
*
* Can be called from both normal and interrupt context.
*
- * Returns non-zero if a failure occured, else zero.
+ * Returns non-zero if a failure occurred, else zero.
*/
int
__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
@@ -625,12 +630,17 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
ha = sp->ha;
/* Set transfer direction */
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
- else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ sp->fcport->ha->qla_stats.output_bytes +=
+ scsi_bufflen(sp->cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
+ sp->fcport->ha->qla_stats.input_bytes +=
+ scsi_bufflen(sp->cmd);
+ }
/* One DSD is available in the Command Type 3 IOCB */
avail_dsds = 1;
@@ -666,7 +676,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* qla24xx_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
*
- * Returns non-zero if a failure occured, else zero.
+ * Returns non-zero if a failure occurred, else zero.
*/
int
qla24xx_start_scsi(srb_t *sp)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 45a3b93eed5..fc4bfa7f839 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -391,9 +391,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
- DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
+ DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no,
mb[1]));
- qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);
+ qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -460,7 +460,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
ha->host_no, mb[1]));
qla_printk(KERN_INFO, ha,
- "LIP reset occured (%x).\n", mb[1]);
+ "LIP reset occurred (%x).\n", mb[1]);
if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -543,7 +543,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
case MBA_PORT_UPDATE: /* Port database update */
/*
- * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
+ * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
* event etc. earlier indicating loop is down) then process
* it. Otherwise ignore it and Wait for RSCN to come in.
*/
@@ -589,7 +589,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
"scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
ha->host_no, mb[1], mb[2], mb[3]));
- rscn_entry = (mb[1] << 16) | mb[2];
+ rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
ha->d_id.b.al_pa;
if (rscn_entry == host_pid) {
@@ -600,6 +600,8 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
}
+ /* Ignore reserved bits from RSCN-payload. */
+ rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
rscn_queue_index = ha->rscn_in_ptr + 1;
if (rscn_queue_index == MAX_RSCN_COUNT)
rscn_queue_index = 0;
@@ -1060,8 +1062,9 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
resid = resid_len;
/* Use F/W calculated residual length. */
if (IS_FWI2_CAPABLE(ha)) {
- if (scsi_status & SS_RESIDUAL_UNDER &&
- resid != fw_resid_len) {
+ if (!(scsi_status & SS_RESIDUAL_UNDER)) {
+ lscsi_status = 0;
+ } else if (resid != fw_resid_len) {
scsi_status &= ~SS_RESIDUAL_UNDER;
lscsi_status = 0;
}
@@ -1834,7 +1837,6 @@ clear_risc_ints:
WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
}
spin_unlock_irq(&ha->hardware_lock);
- ha->isp_ops->enable_intrs(ha);
fail:
return ret;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 813bc7784c0..36bc6851e23 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -233,7 +233,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
DEBUG2_3_11(printk("%s(%ld): timeout schedule "
"isp_abort_needed.\n", __func__, ha->host_no));
qla_printk(KERN_WARNING, ha,
- "Mailbox command timeout occured. Scheduling ISP "
+ "Mailbox command timeout occurred. Scheduling ISP "
"abort.\n");
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
qla2xxx_wake_dpc(ha);
@@ -244,7 +244,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
DEBUG2_3_11(printk("%s(%ld): timeout calling "
"abort_isp\n", __func__, ha->host_no));
qla_printk(KERN_WARNING, ha,
- "Mailbox command timeout occured. Issuing ISP "
+ "Mailbox command timeout occurred. Issuing ISP "
"abort.\n");
set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
@@ -1995,7 +1995,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
char *pmap;
dma_addr_t pmap_dma;
- pmap = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &pmap_dma);
+ pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
if (pmap == NULL) {
DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
__func__, ha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 26afe44265c..3433441b956 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1517,6 +1517,7 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
set_bit(RSCN_UPDATE, &ha->dpc_flags);
+ set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
}
static int
@@ -1663,8 +1664,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->gid_list_info_size = 8;
ha->optrom_size = OPTROM_SIZE_25XX;
ha->isp_ops = &qla25xx_isp_ops;
- ha->hw_event_start = PCI_FUNC(pdev->devfn) ?
- FA_HW_EVENT1_ADDR: FA_HW_EVENT0_ADDR;
}
host->can_queue = ha->request_q_length + 128;
@@ -1740,6 +1739,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto probe_failed;
+ ha->isp_ops->enable_intrs(ha);
+
scsi_scan_host(host);
qla2x00_alloc_sysfs_attr(ha);
@@ -2431,6 +2432,12 @@ qla2x00_do_dpc(void *data)
ha->host_no));
}
+ if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) &&
+ atomic_read(&ha->loop_state) == LOOP_READY) {
+ clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
+ qla2xxx_flash_npiv_conf(ha);
+ }
+
if (!ha->interrupts_on)
ha->isp_ops->enable_intrs(ha);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1bca7447493..90a13211717 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -543,23 +543,198 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
}
}
-void
-qla2xxx_get_flash_info(scsi_qla_host_t *ha)
+static int
+qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
+{
+ const char *loc, *locations[] = { "DEF", "PCI" };
+ uint32_t pcihdr, pcids;
+ uint32_t *dcode;
+ uint8_t *buf, *bcode, last_image;
+ uint16_t cnt, chksum, *wptr;
+ struct qla_flt_location *fltl;
+
+ /*
+ * FLT-location structure resides after the last PCI region.
+ */
+
+ /* Begin with sane defaults. */
+ loc = locations[0];
+ *start = IS_QLA24XX_TYPE(ha) ? FA_FLASH_LAYOUT_ADDR_24:
+ FA_FLASH_LAYOUT_ADDR;
+
+ /* Begin with first PCI expansion ROM header. */
+ buf = (uint8_t *)ha->request_ring;
+ dcode = (uint32_t *)ha->request_ring;
+ pcihdr = 0;
+ last_image = 1;
+ do {
+ /* Verify PCI expansion ROM header. */
+ qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
+ bcode = buf + (pcihdr % 4);
+ if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
+ goto end;
+
+ /* Locate PCI data structure. */
+ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+ qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
+ bcode = buf + (pcihdr % 4);
+
+ /* Validate signature of PCI data structure. */
+ if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
+ bcode[0x2] != 'I' || bcode[0x3] != 'R')
+ goto end;
+
+ last_image = bcode[0x15] & BIT_7;
+
+ /* Locate next PCI expansion ROM. */
+ pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
+ } while (!last_image);
+
+ /* Now verify FLT-location structure. */
+ fltl = (struct qla_flt_location *)ha->request_ring;
+ qla24xx_read_flash_data(ha, dcode, pcihdr >> 2,
+ sizeof(struct qla_flt_location) >> 2);
+ if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
+ fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
+ goto end;
+
+ wptr = (uint16_t *)ha->request_ring;
+ cnt = sizeof(struct qla_flt_location) >> 1;
+ for (chksum = 0; cnt; cnt--)
+ chksum += le16_to_cpu(*wptr++);
+ if (chksum) {
+ qla_printk(KERN_ERR, ha,
+ "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
+ qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location));
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Good data. Use specified location. */
+ loc = locations[1];
+ *start = le16_to_cpu(fltl->start_hi) << 16 |
+ le16_to_cpu(fltl->start_lo);
+end:
+ DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
+ return QLA_SUCCESS;
+}
+
+static void
+qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
+{
+ const char *loc, *locations[] = { "DEF", "FLT" };
+ uint16_t *wptr;
+ uint16_t cnt, chksum;
+ uint32_t start;
+ struct qla_flt_header *flt;
+ struct qla_flt_region *region;
+
+ ha->flt_region_flt = flt_addr;
+ wptr = (uint16_t *)ha->request_ring;
+ flt = (struct qla_flt_header *)ha->request_ring;
+ region = (struct qla_flt_region *)&flt[1];
+ ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
+ flt_addr << 2, OPTROM_BURST_SIZE);
+ if (*wptr == __constant_cpu_to_le16(0xffff))
+ goto no_flash_data;
+ if (flt->version != __constant_cpu_to_le16(1)) {
+ DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: "
+ "version=0x%x length=0x%x checksum=0x%x.\n",
+ le16_to_cpu(flt->version), le16_to_cpu(flt->length),
+ le16_to_cpu(flt->checksum)));
+ goto no_flash_data;
+ }
+
+ cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
+ for (chksum = 0; cnt; cnt--)
+ chksum += le16_to_cpu(*wptr++);
+ if (chksum) {
+ DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
+ "version=0x%x length=0x%x checksum=0x%x.\n",
+ le16_to_cpu(flt->version), le16_to_cpu(flt->length),
+ chksum));
+ goto no_flash_data;
+ }
+
+ loc = locations[1];
+ cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
+ for ( ; cnt; cnt--, region++) {
+ /* Store addresses as DWORD offsets. */
+ start = le32_to_cpu(region->start) >> 2;
+
+ DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
+ "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
+ le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
+
+ switch (le32_to_cpu(region->code)) {
+ case FLT_REG_FW:
+ ha->flt_region_fw = start;
+ break;
+ case FLT_REG_BOOT_CODE:
+ ha->flt_region_boot = start;
+ break;
+ case FLT_REG_VPD_0:
+ ha->flt_region_vpd_nvram = start;
+ break;
+ case FLT_REG_FDT:
+ ha->flt_region_fdt = start;
+ break;
+ case FLT_REG_HW_EVENT_0:
+ if (!PCI_FUNC(ha->pdev->devfn))
+ ha->flt_region_hw_event = start;
+ break;
+ case FLT_REG_HW_EVENT_1:
+ if (PCI_FUNC(ha->pdev->devfn))
+ ha->flt_region_hw_event = start;
+ break;
+ case FLT_REG_NPIV_CONF_0:
+ if (!PCI_FUNC(ha->pdev->devfn))
+ ha->flt_region_npiv_conf = start;
+ break;
+ case FLT_REG_NPIV_CONF_1:
+ if (PCI_FUNC(ha->pdev->devfn))
+ ha->flt_region_npiv_conf = start;
+ break;
+ }
+ }
+ goto done;
+
+no_flash_data:
+ /* Use hardcoded defaults. */
+ loc = locations[0];
+ ha->flt_region_fw = FA_RISC_CODE_ADDR;
+ ha->flt_region_boot = FA_BOOT_CODE_ADDR;
+ ha->flt_region_vpd_nvram = FA_VPD_NVRAM_ADDR;
+ ha->flt_region_fdt = IS_QLA24XX_TYPE(ha) ? FA_FLASH_DESCR_ADDR_24:
+ FA_FLASH_DESCR_ADDR;
+ ha->flt_region_hw_event = !PCI_FUNC(ha->pdev->devfn) ?
+ FA_HW_EVENT0_ADDR: FA_HW_EVENT1_ADDR;
+ ha->flt_region_npiv_conf = !PCI_FUNC(ha->pdev->devfn) ?
+ (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF0_ADDR_24: FA_NPIV_CONF0_ADDR):
+ (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF1_ADDR_24: FA_NPIV_CONF1_ADDR);
+done:
+ DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
+ "vpd_nvram=0x%x fdt=0x%x flt=0x%x hwe=0x%x npiv=0x%x.\n", loc,
+ ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram,
+ ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_hw_event,
+ ha->flt_region_npiv_conf));
+}
+
+static void
+qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
{
#define FLASH_BLK_SIZE_32K 0x8000
#define FLASH_BLK_SIZE_64K 0x10000
+ const char *loc, *locations[] = { "MID", "FDT" };
uint16_t cnt, chksum;
uint16_t *wptr;
struct qla_fdt_layout *fdt;
uint8_t man_id, flash_id;
-
- if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
- return;
+ uint16_t mid, fid;
wptr = (uint16_t *)ha->request_ring;
fdt = (struct qla_fdt_layout *)ha->request_ring;
ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
- FA_FLASH_DESCR_ADDR << 2, OPTROM_BURST_SIZE);
+ ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
if (*wptr == __constant_cpu_to_le16(0xffff))
goto no_flash_data;
if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
@@ -577,7 +752,10 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
goto no_flash_data;
}
- ha->fdt_odd_index = le16_to_cpu(fdt->man_id) == 0x1f;
+ loc = locations[1];
+ mid = le16_to_cpu(fdt->man_id);
+ fid = le16_to_cpu(fdt->id);
+ ha->fdt_odd_index = mid == 0x1f;
ha->fdt_wrt_disable = fdt->wrt_disable_bits;
ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd);
ha->fdt_block_size = le32_to_cpu(fdt->block_size);
@@ -588,16 +766,12 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd):
flash_conf_to_access_addr(0x0336);
}
-
- DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[FDT]: (0x%x/0x%x) erase=0x%x "
- "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n",
- le16_to_cpu(fdt->man_id), le16_to_cpu(fdt->id), ha->fdt_erase_cmd,
- ha->fdt_protect_sec_cmd, ha->fdt_unprotect_sec_cmd,
- ha->fdt_odd_index, ha->fdt_wrt_disable, ha->fdt_block_size));
- return;
-
+ goto done;
no_flash_data:
+ loc = locations[0];
qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
+ mid = man_id;
+ fid = flash_id;
ha->fdt_wrt_disable = 0x9c;
ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8);
switch (man_id) {
@@ -625,14 +799,117 @@ no_flash_data:
ha->fdt_block_size = FLASH_BLK_SIZE_64K;
break;
}
-
- DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[MID]: (0x%x/0x%x) erase=0x%x "
- "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", man_id, flash_id,
+done:
+ DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
+ "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable,
ha->fdt_block_size));
}
+int
+qla2xxx_get_flash_info(scsi_qla_host_t *ha)
+{
+ int ret;
+ uint32_t flt_addr;
+
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
+ return QLA_SUCCESS;
+
+ ret = qla2xxx_find_flt_start(ha, &flt_addr);
+ if (ret != QLA_SUCCESS)
+ return ret;
+
+ qla2xxx_get_flt_info(ha, flt_addr);
+ qla2xxx_get_fdt_info(ha);
+
+ return QLA_SUCCESS;
+}
+
+void
+qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
+{
+#define NPIV_CONFIG_SIZE (16*1024)
+ void *data;
+ uint16_t *wptr;
+ uint16_t cnt, chksum;
+ struct qla_npiv_header hdr;
+ struct qla_npiv_entry *entry;
+
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
+ return;
+
+ ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr,
+ ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
+ if (hdr.version == __constant_cpu_to_le16(0xffff))
+ return;
+ if (hdr.version != __constant_cpu_to_le16(1)) {
+ DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config "
+ "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
+ le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
+ le16_to_cpu(hdr.checksum)));
+ return;
+ }
+
+ data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
+ if (!data) {
+ DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to "
+ "allocate memory.\n"));
+ return;
+ }
+
+ ha->isp_ops->read_optrom(ha, (uint8_t *)data,
+ ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
+
+ cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
+ sizeof(struct qla_npiv_entry)) >> 1;
+ for (wptr = data, chksum = 0; cnt; cnt--)
+ chksum += le16_to_cpu(*wptr++);
+ if (chksum) {
+ DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config "
+ "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
+ le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
+ chksum));
+ goto done;
+ }
+
+ entry = data + sizeof(struct qla_npiv_header);
+ cnt = le16_to_cpu(hdr.entries);
+ for ( ; cnt; cnt--, entry++) {
+ uint16_t flags;
+ struct fc_vport_identifiers vid;
+ struct fc_vport *vport;
+
+ flags = le16_to_cpu(entry->flags);
+ if (flags == 0xffff)
+ continue;
+ if ((flags & BIT_0) == 0)
+ continue;
+
+ memset(&vid, 0, sizeof(vid));
+ vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vid.vport_type = FC_PORTTYPE_NPIV;
+ vid.disable = false;
+ vid.port_name = wwn_to_u64(entry->port_name);
+ vid.node_name = wwn_to_u64(entry->node_name);
+
+ DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
+ "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt,
+ (unsigned long long)vid.port_name,
+ (unsigned long long)vid.node_name,
+ le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos)));
+
+ vport = fc_vport_create(ha->host, 0, &vid);
+ if (!vport)
+ qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to "
+ "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt,
+ (unsigned long long)vid.port_name,
+ (unsigned long long)vid.node_name);
+ }
+done:
+ kfree(data);
+}
+
static void
qla24xx_unprotect_flash(scsi_qla_host_t *ha)
{
@@ -920,7 +1197,8 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
dwptr = (uint32_t *)buf;
for (i = 0; i < bytes >> 2; i++, naddr++)
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
- flash_data_to_access_addr(FA_VPD_NVRAM_ADDR | naddr)));
+ flash_data_to_access_addr(ha->flt_region_vpd_nvram |
+ naddr)));
return buf;
}
@@ -935,10 +1213,10 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
dbuf = vmalloc(RMW_BUFFER_SIZE);
if (!dbuf)
return QLA_MEMORY_ALLOC_FAILED;
- ha->isp_ops->read_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2,
+ ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
RMW_BUFFER_SIZE);
memcpy(dbuf + (naddr << 2), buf, bytes);
- ha->isp_ops->write_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2,
+ ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
RMW_BUFFER_SIZE);
vfree(dbuf);
@@ -2166,7 +2444,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
memset(dbyte, 0, 8);
dcode = (uint16_t *)dbyte;
- qla2x00_read_flash_data(ha, dbyte, FA_RISC_CODE_ADDR * 4 + 10,
+ qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
8);
DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n",
__func__, ha->host_no));
@@ -2177,7 +2455,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
DEBUG2(printk("%s(): Unrecognized fw revision at "
- "%x.\n", __func__, FA_RISC_CODE_ADDR * 4));
+ "%x.\n", __func__, ha->flt_region_fw * 4));
} else {
/* values are in big endian */
ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
@@ -2212,7 +2490,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
dcode = mbuf;
/* Begin with first PCI expansion ROM header. */
- pcihdr = 0;
+ pcihdr = ha->flt_region_boot;
last_image = 1;
do {
/* Verify PCI expansion ROM header. */
@@ -2282,7 +2560,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
dcode = mbuf;
- qla24xx_read_flash_data(ha, dcode, FA_RISC_CODE_ADDR + 4, 4);
+ qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4);
for (i = 0; i < 4; i++)
dcode[i] = be32_to_cpu(dcode[i]);
@@ -2291,7 +2569,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
DEBUG2(printk("%s(): Unrecognized fw version at %x.\n",
- __func__, FA_RISC_CODE_ADDR));
+ __func__, ha->flt_region_fw));
} else {
ha->fw_revision[0] = dcode[0];
ha->fw_revision[1] = dcode[1];
@@ -2355,7 +2633,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
/* Locate first empty entry. */
for (;;) {
if (ha->hw_event_ptr >=
- ha->hw_event_start + FA_HW_EVENT_SIZE) {
+ ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"HW event -- Log Full!\n"));
return QLA_MEMORY_ALLOC_FAILED;
@@ -2391,7 +2669,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
int rval;
uint32_t marker[2], fdata[4];
- if (ha->hw_event_start == 0)
+ if (ha->flt_region_hw_event == 0)
return QLA_FUNCTION_FAILED;
DEBUG2(qla_printk(KERN_WARNING, ha,
@@ -2406,7 +2684,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER);
/* Locate marker. */
- ha->hw_event_ptr = ha->hw_event_start;
+ ha->hw_event_ptr = ha->flt_region_hw_event;
for (;;) {
qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr,
4);
@@ -2415,7 +2693,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
break;
ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
if (ha->hw_event_ptr >=
- ha->hw_event_start + FA_HW_EVENT_SIZE) {
+ ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"HW event -- Log Full!\n"));
return QLA_MEMORY_ALLOC_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 4160e4caa7b..be5e299df52 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.02.01-k7"
+#define QLA2XXX_VERSION "8.02.01-k8"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 88bebb13bc5..de8279ad7d8 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
DEBUG2(printk(KERN_INFO
"scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
"dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
- cmd, jiffies, cmd->timeout_per_command / HZ,
+ cmd, jiffies, cmd->request->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
/* FIXME: wait for hba to go online */
@@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
DEBUG2(printk(KERN_INFO
"scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
"to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
- ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ,
+ ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
stat = qla4xxx_reset_target(ha, ddb_entry);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 4a1cf6377f6..90535089672 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -914,6 +914,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
ds[i].d_count = sg_dma_len(s);
}
sg_count -= n;
+ sg = s;
}
} else {
cmd->dataseg[0].d_base = 0;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index ee6be596503..2ac3cb2b908 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
unsigned long flags;
cmd->device = dev;
- init_timer(&cmd->eh_timeout);
INIT_LIST_HEAD(&cmd->list);
spin_lock_irqsave(&dev->list_lock, flags);
list_add_tail(&cmd->list, &dev->cmd_list);
@@ -652,26 +651,33 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
unsigned long timeout;
int rtn = 0;
+ /*
+ * We will use a queued command if possible, otherwise we will
+ * emulate the queuing and calling of completion function ourselves.
+ */
+ atomic_inc(&cmd->device->iorequest_cnt);
+
/* check if the device is still usable */
if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
/* in SDEV_DEL we error all commands. DID_NO_CONNECT
* returns an immediate error upwards, and signals
* that the device is no longer present */
cmd->result = DID_NO_CONNECT << 16;
- atomic_inc(&cmd->device->iorequest_cnt);
- __scsi_done(cmd);
+ scsi_done(cmd);
/* return 0 (because the command has been processed) */
goto out;
}
- /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
- if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
+ /* Check to see if the scsi lld made this device blocked. */
+ if (unlikely(scsi_device_blocked(cmd->device))) {
/*
- * in SDEV_BLOCK, the command is just put back on the device
- * queue. The suspend state has already blocked the queue so
- * future requests should not occur until the device
- * transitions out of the suspend state.
+ * in blocked state, the command is just put back on
+ * the device queue. The suspend state has already
+ * blocked the queue so future requests should not
+ * occur until the device transitions out of the
+ * suspend state.
*/
+
scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
@@ -714,21 +720,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
host->resetting = 0;
}
- /*
- * AK: unlikely race here: for some reason the timer could
- * expire before the serial number is set up below.
- */
- scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
-
scsi_log_send(cmd);
/*
- * We will use a queued command if possible, otherwise we will
- * emulate the queuing and calling of completion function ourselves.
- */
- atomic_inc(&cmd->device->iorequest_cnt);
-
- /*
* Before we queue this command, check if the command
* length exceeds what the host adapter can handle.
*/
@@ -744,6 +738,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(host->host_lock, flags);
+ /*
+ * AK: unlikely race here: for some reason the timer could
+ * expire before the serial number is set up below.
+ *
+ * TODO: kill serial or move to blk layer
+ */
scsi_cmd_get_serial(host, cmd);
if (unlikely(host->shost_state == SHOST_DEL)) {
@@ -754,12 +754,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
spin_unlock_irqrestore(host->host_lock, flags);
if (rtn) {
- if (scsi_delete_timer(cmd)) {
- atomic_inc(&cmd->device->iodone_cnt);
- scsi_queue_insert(cmd,
- (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
- rtn : SCSI_MLQUEUE_HOST_BUSY);
- }
+ scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
+ rtn : SCSI_MLQUEUE_HOST_BUSY);
SCSI_LOG_MLQUEUE(3,
printk("queuecommand : request rejected\n"));
}
@@ -770,24 +766,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
/**
- * scsi_req_abort_cmd -- Request command recovery for the specified command
- * @cmd: pointer to the SCSI command of interest
- *
- * This function requests that SCSI Core start recovery for the
- * command by deleting the timer and adding the command to the eh
- * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
- * implement their own error recovery MAY ignore the timeout event if
- * they generated scsi_req_abort_cmd.
- */
-void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
-{
- if (!scsi_delete_timer(cmd))
- return;
- scsi_times_out(cmd);
-}
-EXPORT_SYMBOL(scsi_req_abort_cmd);
-
-/**
* scsi_done - Enqueue the finished SCSI command into the done queue.
* @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
* ownership back to SCSI Core -- i.e. the LLDD has finished with it.
@@ -802,42 +780,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
*/
static void scsi_done(struct scsi_cmnd *cmd)
{
- /*
- * We don't have to worry about this one timing out anymore.
- * If we are unable to remove the timer, then the command
- * has already timed out. In which case, we have no choice but to
- * let the timeout function run, as we have no idea where in fact
- * that function could really be. It might be on another processor,
- * etc, etc.
- */
- if (!scsi_delete_timer(cmd))
- return;
- __scsi_done(cmd);
-}
-
-/* Private entry to scsi_done() to complete a command when the timer
- * isn't running --- used by scsi_times_out */
-void __scsi_done(struct scsi_cmnd *cmd)
-{
- struct request *rq = cmd->request;
-
- /*
- * Set the serial numbers back to zero
- */
- cmd->serial_number = 0;
-
- atomic_inc(&cmd->device->iodone_cnt);
- if (cmd->result)
- atomic_inc(&cmd->device->ioerr_cnt);
-
- BUG_ON(!rq);
-
- /*
- * The uptodate/nbytes values don't matter, as we allow partial
- * completes and thus will check this in the softirq callback
- */
- rq->completion_data = cmd;
- blk_complete_request(rq);
+ blk_complete_request(cmd->request);
}
/* Move this to a header if it becomes more generally useful */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 39ce3aba1da..fecefa05cb6 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -112,69 +112,8 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
}
/**
- * scsi_add_timer - Start timeout timer for a single scsi command.
- * @scmd: scsi command that is about to start running.
- * @timeout: amount of time to allow this command to run.
- * @complete: timeout function to call if timer isn't canceled.
- *
- * Notes:
- * This should be turned into an inline function. Each scsi command
- * has its own timer, and as it is added to the queue, we set up the
- * timer. When the command completes, we cancel the timer.
- */
-void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
- void (*complete)(struct scsi_cmnd *))
-{
-
- /*
- * If the clock was already running for this command, then
- * first delete the timer. The timer handling code gets rather
- * confused if we don't do this.
- */
- if (scmd->eh_timeout.function)
- del_timer(&scmd->eh_timeout);
-
- scmd->eh_timeout.data = (unsigned long)scmd;
- scmd->eh_timeout.expires = jiffies + timeout;
- scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
-
- SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
- " %d, (%p)\n", __func__,
- scmd, timeout, complete));
-
- add_timer(&scmd->eh_timeout);
-}
-
-/**
- * scsi_delete_timer - Delete/cancel timer for a given function.
- * @scmd: Cmd that we are canceling timer for
- *
- * Notes:
- * This should be turned into an inline function.
- *
- * Return value:
- * 1 if we were able to detach the timer. 0 if we blew it, and the
- * timer function has already started to run.
- */
-int scsi_delete_timer(struct scsi_cmnd *scmd)
-{
- int rtn;
-
- rtn = del_timer(&scmd->eh_timeout);
-
- SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
- " rtn: %d\n", __func__,
- scmd, rtn));
-
- scmd->eh_timeout.data = (unsigned long)NULL;
- scmd->eh_timeout.function = NULL;
-
- return rtn;
-}
-
-/**
* scsi_times_out - Timeout function for normal scsi commands.
- * @scmd: Cmd that is timing out.
+ * @req: request that is timing out.
*
* Notes:
* We do not need to lock this. There is the potential for a race
@@ -182,9 +121,11 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
* normal completion function determines that the timer has already
* fired, then it mustn't do anything.
*/
-void scsi_times_out(struct scsi_cmnd *scmd)
+enum blk_eh_timer_return scsi_times_out(struct request *req)
{
- enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
+ struct scsi_cmnd *scmd = req->special;
+ enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
+ enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
scsi_log_completion(scmd, TIMEOUT_ERROR);
@@ -196,22 +137,20 @@ void scsi_times_out(struct scsi_cmnd *scmd)
eh_timed_out = NULL;
if (eh_timed_out)
- switch (eh_timed_out(scmd)) {
- case EH_HANDLED:
- __scsi_done(scmd);
- return;
- case EH_RESET_TIMER:
- scsi_add_timer(scmd, scmd->timeout_per_command,
- scsi_times_out);
- return;
- case EH_NOT_HANDLED:
+ rtn = eh_timed_out(scmd);
+ switch (rtn) {
+ case BLK_EH_NOT_HANDLED:
break;
+ default:
+ return rtn;
}
if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
scmd->result |= DID_TIME_OUT << 16;
- __scsi_done(scmd);
+ return BLK_EH_HANDLED;
}
+
+ return BLK_EH_NOT_HANDLED;
}
/**
@@ -1793,7 +1732,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
blk_rq_init(NULL, &req);
scmd->request = &req;
- memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
scmd->cmnd = req.cmd;
@@ -1804,8 +1742,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
scmd->sc_data_direction = DMA_BIDIRECTIONAL;
- init_timer(&scmd->eh_timeout);
-
spin_lock_irqsave(shost->host_lock, flags);
shost->tmf_in_progress = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ff5d56b3ee4..98ee55ced59 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -852,7 +852,7 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- int this_count = scsi_bufflen(cmd);
+ int this_count;
struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
int error = 0;
@@ -908,6 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
return;
+ this_count = blk_rq_bytes(req);
/* good_bytes = 0, or (inclusive) there were leftovers and
* result = 0, so scsi_end_request couldn't retry.
@@ -1180,7 +1181,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
cmd->transfersize = req->data_len;
cmd->allowed = req->retries;
- cmd->timeout_per_command = req->timeout;
return BLKPREP_OK;
}
EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
@@ -1250,6 +1250,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
break;
case SDEV_QUIESCE:
case SDEV_BLOCK:
+ case SDEV_CREATED_BLOCK:
/*
* If the devices is blocked we defer normal commands.
*/
@@ -1415,17 +1416,26 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
spin_unlock(shost->host_lock);
spin_lock(sdev->request_queue->queue_lock);
- __scsi_done(cmd);
+ blk_complete_request(req);
}
static void scsi_softirq_done(struct request *rq)
{
- struct scsi_cmnd *cmd = rq->completion_data;
- unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command;
+ struct scsi_cmnd *cmd = rq->special;
+ unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
int disposition;
INIT_LIST_HEAD(&cmd->eh_entry);
+ /*
+ * Set the serial numbers back to zero
+ */
+ cmd->serial_number = 0;
+
+ atomic_inc(&cmd->device->iodone_cnt);
+ if (cmd->result)
+ atomic_inc(&cmd->device->ioerr_cnt);
+
disposition = scsi_decide_disposition(cmd);
if (disposition != SUCCESS &&
time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
@@ -1674,6 +1684,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
+ blk_queue_rq_timed_out(q, scsi_times_out);
return q;
}
@@ -2063,10 +2074,13 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (state) {
case SDEV_CREATED:
- /* There are no legal states that come back to
- * created. This is the manually initialised start
- * state */
- goto illegal;
+ switch (oldstate) {
+ case SDEV_CREATED_BLOCK:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
case SDEV_RUNNING:
switch (oldstate) {
@@ -2104,8 +2118,17 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_BLOCK:
switch (oldstate) {
- case SDEV_CREATED:
case SDEV_RUNNING:
+ case SDEV_CREATED_BLOCK:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SDEV_CREATED_BLOCK:
+ switch (oldstate) {
+ case SDEV_CREATED:
break;
default:
goto illegal;
@@ -2393,8 +2416,12 @@ scsi_internal_device_block(struct scsi_device *sdev)
int err = 0;
err = scsi_device_set_state(sdev, SDEV_BLOCK);
- if (err)
- return err;
+ if (err) {
+ err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
+
+ if (err)
+ return err;
+ }
/*
* The device has transitioned to SDEV_BLOCK. Stop the
@@ -2437,8 +2464,12 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
* and goose the device queue if successful.
*/
err = scsi_device_set_state(sdev, SDEV_RUNNING);
- if (err)
- return err;
+ if (err) {
+ err = scsi_device_set_state(sdev, SDEV_CREATED);
+
+ if (err)
+ return err;
+ }
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index ae7ed9a2266..b37e133de80 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -21,6 +21,7 @@
#include <linux/time.h>
#include <linux/jiffies.h>
#include <linux/security.h>
+#include <linux/delay.h>
#include <net/sock.h>
#include <net/netlink.h>
@@ -30,6 +31,39 @@
struct sock *scsi_nl_sock = NULL;
EXPORT_SYMBOL_GPL(scsi_nl_sock);
+static DEFINE_SPINLOCK(scsi_nl_lock);
+static struct list_head scsi_nl_drivers;
+
+static u32 scsi_nl_state;
+#define STATE_EHANDLER_BSY 0x00000001
+
+struct scsi_nl_transport {
+ int (*msg_handler)(struct sk_buff *);
+ void (*event_handler)(struct notifier_block *, unsigned long, void *);
+ unsigned int refcnt;
+ int flags;
+};
+
+/* flags values (bit flags) */
+#define HANDLER_DELETING 0x1
+
+static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
+ { {NULL, }, };
+
+
+struct scsi_nl_drvr {
+ struct list_head next;
+ int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
+ u32 len, u32 pid);
+ void (*devt_handler)(struct notifier_block *nb,
+ unsigned long event, void *notify_ptr);
+ struct scsi_host_template *hostt;
+ u64 vendor_id;
+ unsigned int refcnt;
+ int flags;
+};
+
+
/**
* scsi_nl_rcv_msg - Receive message handler.
@@ -45,8 +79,9 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
struct scsi_nl_hdr *hdr;
- uint32_t rlen;
- int err;
+ unsigned long flags;
+ u32 rlen;
+ int err, tport;
while (skb->len >= NLMSG_SPACE(0)) {
err = 0;
@@ -65,7 +100,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
err = -EBADMSG;
- return;
+ goto next_msg;
}
hdr = NLMSG_DATA(nlh);
@@ -83,12 +118,27 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
printk(KERN_WARNING "%s: discarding partial message\n",
__func__);
- return;
+ goto next_msg;
}
/*
- * We currently don't support anyone sending us a message
+ * Deliver message to the appropriate transport
*/
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+
+ tport = hdr->transport;
+ if ((tport < SCSI_NL_MAX_TRANSPORTS) &&
+ !(transports[tport].flags & HANDLER_DELETING) &&
+ (transports[tport].msg_handler)) {
+ transports[tport].refcnt++;
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ err = transports[tport].msg_handler(skb);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ transports[tport].refcnt--;
+ } else
+ err = -ENOENT;
+
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
next_msg:
if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
@@ -110,14 +160,42 @@ static int
scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct netlink_notify *n = ptr;
+ struct scsi_nl_drvr *driver;
+ unsigned long flags;
+ int tport;
if (n->protocol != NETLINK_SCSITRANSPORT)
return NOTIFY_DONE;
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ scsi_nl_state |= STATE_EHANDLER_BSY;
+
/*
- * Currently, we are not tracking PID's, etc. There is nothing
- * to handle.
+ * Pass event on to any transports that may be listening
*/
+ for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
+ if (!(transports[tport].flags & HANDLER_DELETING) &&
+ (transports[tport].event_handler)) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ transports[tport].event_handler(this, event, ptr);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+ }
+
+ /*
+ * Pass event on to any drivers that may be listening
+ */
+ list_for_each_entry(driver, &scsi_nl_drivers, next) {
+ if (!(driver->flags & HANDLER_DELETING) &&
+ (driver->devt_handler)) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ driver->devt_handler(this, event, ptr);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+ }
+
+ scsi_nl_state &= ~STATE_EHANDLER_BSY;
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
return NOTIFY_DONE;
}
@@ -128,7 +206,281 @@ static struct notifier_block scsi_netlink_notifier = {
/**
- * scsi_netlink_init - Called by SCSI subsystem to intialize the SCSI transport netlink interface
+ * GENERIC SCSI transport receive and event handlers
+ **/
+
+/**
+ * scsi_generic_msg_handler - receive message handler for GENERIC transport
+ * messages
+ *
+ * @skb: socket receive buffer
+ *
+ **/
+static int
+scsi_generic_msg_handler(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
+ struct scsi_nl_drvr *driver;
+ struct Scsi_Host *shost;
+ unsigned long flags;
+ int err = 0, match, pid;
+
+ pid = NETLINK_CREDS(skb)->pid;
+
+ switch (snlh->msgtype) {
+ case SCSI_NL_SHOST_VENDOR:
+ {
+ struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
+
+ /* Locate the driver that corresponds to the message */
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ match = 0;
+ list_for_each_entry(driver, &scsi_nl_drivers, next) {
+ if (driver->vendor_id == msg->vendor_id) {
+ match = 1;
+ break;
+ }
+ }
+
+ if ((!match) || (!driver->dmsg_handler)) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ err = -ESRCH;
+ goto rcv_exit;
+ }
+
+ if (driver->flags & HANDLER_DELETING) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ err = -ESHUTDOWN;
+ goto rcv_exit;
+ }
+
+ driver->refcnt++;
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+
+
+ /* if successful, scsi_host_lookup takes a shost reference */
+ shost = scsi_host_lookup(msg->host_no);
+ if (!shost) {
+ err = -ENODEV;
+ goto driver_exit;
+ }
+
+ /* is this host owned by the vendor ? */
+ if (shost->hostt != driver->hostt) {
+ err = -EINVAL;
+ goto vendormsg_put;
+ }
+
+ /* pass message on to the driver */
+ err = driver->dmsg_handler(shost, (void *)&msg[1],
+ msg->vmsg_datalen, pid);
+
+vendormsg_put:
+ /* release reference by scsi_host_lookup */
+ scsi_host_put(shost);
+
+driver_exit:
+ /* release our own reference on the registration object */
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ driver->refcnt--;
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ break;
+ }
+
+ default:
+ err = -EBADR;
+ break;
+ }
+
+rcv_exit:
+ if (err)
+ printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
+ __func__, snlh->msgtype, err);
+ return err;
+}
+
+
+/**
+ * scsi_nl_add_transport -
+ * Registers message and event handlers for a transport. Enables
+ * receipt of netlink messages and events to a transport.
+ *
+ * @tport: transport registering handlers
+ * @msg_handler: receive message handler callback
+ * @event_handler: receive event handler callback
+ **/
+int
+scsi_nl_add_transport(u8 tport,
+ int (*msg_handler)(struct sk_buff *),
+ void (*event_handler)(struct notifier_block *, unsigned long, void *))
+{
+ unsigned long flags;
+ int err = 0;
+
+ if (tport >= SCSI_NL_MAX_TRANSPORTS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+
+ if (scsi_nl_state & STATE_EHANDLER_BSY) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ msleep(1);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+
+ if (transports[tport].msg_handler || transports[tport].event_handler) {
+ err = -EALREADY;
+ goto register_out;
+ }
+
+ transports[tport].msg_handler = msg_handler;
+ transports[tport].event_handler = event_handler;
+ transports[tport].flags = 0;
+ transports[tport].refcnt = 0;
+
+register_out:
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
+
+
+/**
+ * scsi_nl_remove_transport -
+ * Disable transport receiption of messages and events
+ *
+ * @tport: transport deregistering handlers
+ *
+ **/
+void
+scsi_nl_remove_transport(u8 tport)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ if (scsi_nl_state & STATE_EHANDLER_BSY) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ msleep(1);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+
+ if (tport < SCSI_NL_MAX_TRANSPORTS) {
+ transports[tport].flags |= HANDLER_DELETING;
+
+ while (transports[tport].refcnt != 0) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ schedule_timeout_uninterruptible(HZ/4);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+ transports[tport].msg_handler = NULL;
+ transports[tport].event_handler = NULL;
+ transports[tport].flags = 0;
+ }
+
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+
+ return;
+}
+EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
+
+
+/**
+ * scsi_nl_add_driver -
+ * A driver is registering its interfaces for SCSI netlink messages
+ *
+ * @vendor_id: A unique identification value for the driver.
+ * @hostt: address of the driver's host template. Used
+ * to verify an shost is bound to the driver
+ * @nlmsg_handler: receive message handler callback
+ * @nlevt_handler: receive event handler callback
+ *
+ * Returns:
+ * 0 on Success
+ * error result otherwise
+ **/
+int
+scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
+ int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
+ u32 len, u32 pid),
+ void (*nlevt_handler)(struct notifier_block *nb,
+ unsigned long event, void *notify_ptr))
+{
+ struct scsi_nl_drvr *driver;
+ unsigned long flags;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (unlikely(!driver)) {
+ printk(KERN_ERR "%s: allocation failure\n", __func__);
+ return -ENOMEM;
+ }
+
+ driver->dmsg_handler = nlmsg_handler;
+ driver->devt_handler = nlevt_handler;
+ driver->hostt = hostt;
+ driver->vendor_id = vendor_id;
+
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ if (scsi_nl_state & STATE_EHANDLER_BSY) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ msleep(1);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+ list_add_tail(&driver->next, &scsi_nl_drivers);
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
+
+
+/**
+ * scsi_nl_remove_driver -
+ * An driver is unregistering with the SCSI netlink messages
+ *
+ * @vendor_id: The unique identification value for the driver.
+ **/
+void
+scsi_nl_remove_driver(u64 vendor_id)
+{
+ struct scsi_nl_drvr *driver;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ if (scsi_nl_state & STATE_EHANDLER_BSY) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ msleep(1);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+
+ list_for_each_entry(driver, &scsi_nl_drivers, next) {
+ if (driver->vendor_id == vendor_id) {
+ driver->flags |= HANDLER_DELETING;
+ while (driver->refcnt != 0) {
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ schedule_timeout_uninterruptible(HZ/4);
+ spin_lock_irqsave(&scsi_nl_lock, flags);
+ }
+ list_del(&driver->next);
+ kfree(driver);
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+ return;
+ }
+ }
+
+ spin_unlock_irqrestore(&scsi_nl_lock, flags);
+
+ printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
+ __func__, (unsigned long long)vendor_id);
+ return;
+}
+EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
+
+
+/**
+ * scsi_netlink_init - Called by SCSI subsystem to intialize
+ * the SCSI transport netlink interface
*
**/
void
@@ -136,6 +488,8 @@ scsi_netlink_init(void)
{
int error;
+ INIT_LIST_HEAD(&scsi_nl_drivers);
+
error = netlink_register_notifier(&scsi_netlink_notifier);
if (error) {
printk(KERN_ERR "%s: register of event handler failed - %d\n",
@@ -150,8 +504,15 @@ scsi_netlink_init(void)
printk(KERN_ERR "%s: register of recieve handler failed\n",
__func__);
netlink_unregister_notifier(&scsi_netlink_notifier);
+ return;
}
+ /* Register the entry points for the generic SCSI transport */
+ error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
+ scsi_generic_msg_handler, NULL);
+ if (error)
+ printk(KERN_ERR "%s: register of GENERIC transport handler"
+ " failed - %d\n", __func__, error);
return;
}
@@ -163,6 +524,8 @@ scsi_netlink_init(void)
void
scsi_netlink_exit(void)
{
+ scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
+
if (scsi_nl_sock) {
netlink_kernel_release(scsi_nl_sock);
netlink_unregister_notifier(&scsi_netlink_notifier);
@@ -172,3 +535,147 @@ scsi_netlink_exit(void)
}
+/*
+ * Exported Interfaces
+ */
+
+/**
+ * scsi_nl_send_transport_msg -
+ * Generic function to send a single message from a SCSI transport to
+ * a single process
+ *
+ * @pid: receiving pid
+ * @hdr: message payload
+ *
+ **/
+void
+scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
+{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ const char *fn;
+ char *datab;
+ u32 len, skblen;
+ int err;
+
+ if (!scsi_nl_sock) {
+ err = -ENOENT;
+ fn = "netlink socket";
+ goto msg_fail;
+ }
+
+ len = NLMSG_SPACE(hdr->msglen);
+ skblen = NLMSG_SPACE(len);
+
+ skb = alloc_skb(skblen, GFP_KERNEL);
+ if (!skb) {
+ err = -ENOBUFS;
+ fn = "alloc_skb";
+ goto msg_fail;
+ }
+
+ nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
+ if (!nlh) {
+ err = -ENOBUFS;
+ fn = "nlmsg_put";
+ goto msg_fail_skb;
+ }
+ datab = NLMSG_DATA(nlh);
+ memcpy(datab, hdr, hdr->msglen);
+
+ err = nlmsg_unicast(scsi_nl_sock, skb, pid);
+ if (err < 0) {
+ fn = "nlmsg_unicast";
+ /* nlmsg_unicast already kfree_skb'd */
+ goto msg_fail;
+ }
+
+ return;
+
+msg_fail_skb:
+ kfree_skb(skb);
+msg_fail:
+ printk(KERN_WARNING
+ "%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
+ "msglen %d: %s : err %d\n",
+ __func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
+ fn, err);
+ return;
+}
+EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
+
+
+/**
+ * scsi_nl_send_vendor_msg - called to send a shost vendor unique message
+ * to a specific process id.
+ *
+ * @pid: process id of the receiver
+ * @host_no: host # sending the message
+ * @vendor_id: unique identifier for the driver's vendor
+ * @data_len: amount, in bytes, of vendor unique payload data
+ * @data_buf: pointer to vendor unique data buffer
+ *
+ * Returns:
+ * 0 on succesful return
+ * otherwise, failing error code
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+int
+scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
+ char *data_buf, u32 data_len)
+{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct scsi_nl_host_vendor_msg *msg;
+ u32 len, skblen;
+ int err;
+
+ if (!scsi_nl_sock) {
+ err = -ENOENT;
+ goto send_vendor_fail;
+ }
+
+ len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
+ skblen = NLMSG_SPACE(len);
+
+ skb = alloc_skb(skblen, GFP_KERNEL);
+ if (!skb) {
+ err = -ENOBUFS;
+ goto send_vendor_fail;
+ }
+
+ nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
+ skblen - sizeof(*nlh), 0);
+ if (!nlh) {
+ err = -ENOBUFS;
+ goto send_vendor_fail_skb;
+ }
+ msg = NLMSG_DATA(nlh);
+
+ INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
+ SCSI_NL_SHOST_VENDOR, len);
+ msg->vendor_id = vendor_id;
+ msg->host_no = host_no;
+ msg->vmsg_datalen = data_len; /* bytes */
+ memcpy(&msg[1], data_buf, data_len);
+
+ err = nlmsg_unicast(scsi_nl_sock, skb, pid);
+ if (err)
+ /* nlmsg_multicast already kfree_skb'd */
+ goto send_vendor_fail;
+
+ return 0;
+
+send_vendor_fail_skb:
+ kfree_skb(skb);
+send_vendor_fail:
+ printk(KERN_WARNING
+ "%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
+ __func__, host_no, err);
+ return err;
+}
+EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
+
+
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 79f0f751120..6cddd5dd323 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -4,6 +4,7 @@
#include <linux/device.h>
struct request_queue;
+struct request;
struct scsi_cmnd;
struct scsi_device;
struct scsi_host_template;
@@ -27,7 +28,6 @@ extern void scsi_exit_hosts(void);
extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
-extern void __scsi_done(struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_LOGGING
void scsi_log_send(struct scsi_cmnd *cmd);
void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@@ -49,10 +49,7 @@ extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
/* scsi_error.c */
-extern void scsi_add_timer(struct scsi_cmnd *, int,
- void (*)(struct scsi_cmnd *));
-extern int scsi_delete_timer(struct scsi_cmnd *);
-extern void scsi_times_out(struct scsi_cmnd *cmd);
+extern enum blk_eh_timer_return scsi_times_out(struct request *req);
extern int scsi_error_handler(void *host);
extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
extern void scsi_eh_wakeup(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index c6a904a45bf..82f7b2dd08a 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -259,8 +259,8 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
int error = -ENXIO;
shost = scsi_host_lookup(host);
- if (IS_ERR(shost))
- return PTR_ERR(shost);
+ if (!shost)
+ return error;
if (shost->transportt->user_scan)
error = shost->transportt->user_scan(shost, channel, id, lun);
@@ -287,8 +287,8 @@ static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
int error = -ENXIO;
shost = scsi_host_lookup(host);
- if (IS_ERR(shost))
- return PTR_ERR(shost);
+ if (!shost)
+ return error;
sdev = scsi_device_lookup(shost, channel, id, lun);
if (sdev) {
scsi_remove_device(sdev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 34d0de6cd51..334862e26a1 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -730,6 +730,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
int *bflags, int async)
{
+ int ret;
+
/*
* XXX do not save the inquiry, since it can change underneath us,
* save just vendor/model/rev.
@@ -885,7 +887,17 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
/* set the device running here so that slave configure
* may do I/O */
- scsi_device_set_state(sdev, SDEV_RUNNING);
+ ret = scsi_device_set_state(sdev, SDEV_RUNNING);
+ if (ret) {
+ ret = scsi_device_set_state(sdev, SDEV_BLOCK);
+
+ if (ret) {
+ sdev_printk(KERN_ERR, sdev,
+ "in wrong state %s to complete scan\n",
+ scsi_device_state_name(sdev->sdev_state));
+ return SCSI_SCAN_NO_RESPONSE;
+ }
+ }
if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
sdev->use_192_bytes_for_3f = 1;
@@ -899,7 +911,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
transport_configure_device(&sdev->sdev_gendev);
if (sdev->host->hostt->slave_configure) {
- int ret = sdev->host->hostt->slave_configure(sdev);
+ ret = sdev->host->hostt->slave_configure(sdev);
if (ret) {
/*
* if LLDD reports slave not present, don't clutter
@@ -994,7 +1006,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
*/
sdev = scsi_device_lookup_by_target(starget, lun);
if (sdev) {
- if (rescan || sdev->sdev_state != SDEV_CREATED) {
+ if (rescan || !scsi_device_created(sdev)) {
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
"scsi scan: device exists on %s\n",
sdev->sdev_gendev.bus_id));
@@ -1467,7 +1479,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
kfree(lun_data);
out:
scsi_device_put(sdev);
- if (sdev->sdev_state == SDEV_CREATED)
+ if (scsi_device_created(sdev))
/*
* the sdev we used didn't appear in the report luns scan
*/
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ab3c71869be..93c28f30bbd 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -34,6 +34,7 @@ static const struct {
{ SDEV_QUIESCE, "quiesce" },
{ SDEV_OFFLINE, "offline" },
{ SDEV_BLOCK, "blocked" },
+ { SDEV_CREATED_BLOCK, "created-blocked" },
};
const char *scsi_device_state_name(enum scsi_device_state state)
@@ -560,12 +561,15 @@ sdev_rd_attr (vendor, "%.8s\n");
sdev_rd_attr (model, "%.16s\n");
sdev_rd_attr (rev, "%.4s\n");
+/*
+ * TODO: can we make these symlinks to the block layer ones?
+ */
static ssize_t
sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev;
sdev = to_scsi_device(dev);
- return snprintf (buf, 20, "%d\n", sdev->timeout / HZ);
+ return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
}
static ssize_t
@@ -576,7 +580,7 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr,
int timeout;
sdev = to_scsi_device(dev);
sscanf (buf, "%d\n", &timeout);
- sdev->timeout = timeout * HZ;
+ blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
return count;
}
static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 257e097c39a..48ba413f7f6 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
int err;
dprintk("%lx %u\n", uaddr, len);
- err = blk_rq_map_user(q, rq, (void *)uaddr, len);
+ err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
if (err) {
/*
* TODO: need to fixup sg_tablesize, max_segment_size,
@@ -460,7 +460,7 @@ int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
/* TODO: replace with a O(1) alg */
shost = scsi_host_lookup(host_no);
- if (IS_ERR(shost)) {
+ if (!shost) {
printk(KERN_ERR "Could not find host no %d\n", host_no);
return -EINVAL;
}
@@ -550,7 +550,7 @@ int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 itn_id, u64 mid, int result)
dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
shost = scsi_host_lookup(host_no);
- if (IS_ERR(shost)) {
+ if (!shost) {
printk(KERN_ERR "Could not find host no %d\n", host_no);
return err;
}
@@ -603,7 +603,7 @@ int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id);
shost = scsi_host_lookup(host_no);
- if (IS_ERR(shost)) {
+ if (!shost) {
printk(KERN_ERR "Could not find host no %d\n", host_no);
return err;
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 56823fd1fb8..d5f7653bb94 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -40,31 +40,7 @@
static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
static void fc_vport_sched_delete(struct work_struct *work);
-
-/*
- * This is a temporary carrier for creating a vport. It will eventually
- * be replaced by a real message definition for sgio or netlink.
- *
- * fc_vport_identifiers: This set of data contains all elements
- * to uniquely identify and instantiate a FC virtual port.
- *
- * Notes:
- * symbolic_name: The driver is to append the symbolic_name string data
- * to the symbolic_node_name data that it generates by default.
- * the resulting combination should then be registered with the switch.
- * It is expected that things like Xen may stuff a VM title into
- * this field.
- */
-struct fc_vport_identifiers {
- u64 node_name;
- u64 port_name;
- u32 roles;
- bool disable;
- enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
- char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
-};
-
-static int fc_vport_create(struct Scsi_Host *shost, int channel,
+static int fc_vport_setup(struct Scsi_Host *shost, int channel,
struct device *pdev, struct fc_vport_identifiers *ids,
struct fc_vport **vport);
@@ -1760,7 +1736,7 @@ store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
vid.disable = false; /* always enabled */
/* we only allow support on Channel 0 !!! */
- stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport);
+ stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
return stat ? stat : count;
}
static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
@@ -1950,15 +1926,15 @@ static int fc_vport_match(struct attribute_container *cont,
* Notes:
* This routine assumes no locks are held on entry.
*/
-static enum scsi_eh_timer_return
+static enum blk_eh_timer_return
fc_timed_out(struct scsi_cmnd *scmd)
{
struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
if (rport->port_state == FC_PORTSTATE_BLOCKED)
- return EH_RESET_TIMER;
+ return BLK_EH_RESET_TIMER;
- return EH_NOT_HANDLED;
+ return BLK_EH_NOT_HANDLED;
}
/*
@@ -3103,7 +3079,7 @@ fc_scsi_scan_rport(struct work_struct *work)
/**
- * fc_vport_create - allocates and creates a FC virtual port.
+ * fc_vport_setup - allocates and creates a FC virtual port.
* @shost: scsi host the virtual port is connected to.
* @channel: Channel on shost port connected to.
* @pdev: parent device for vport
@@ -3118,7 +3094,7 @@ fc_scsi_scan_rport(struct work_struct *work)
* This routine assumes no locks are held on entry.
*/
static int
-fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
+fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
{
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
@@ -3231,6 +3207,28 @@ delete_vport:
return error;
}
+/**
+ * fc_vport_create - Admin App or LLDD requests creation of a vport
+ * @shost: scsi host the virtual port is connected to.
+ * @channel: channel on shost port connected to.
+ * @ids: The world wide names, FC4 port roles, etc for
+ * the virtual port.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ */
+struct fc_vport *
+fc_vport_create(struct Scsi_Host *shost, int channel,
+ struct fc_vport_identifiers *ids)
+{
+ int stat;
+ struct fc_vport *vport;
+
+ stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
+ ids, &vport);
+ return stat ? NULL : vport;
+}
+EXPORT_SYMBOL(fc_vport_create);
/**
* fc_vport_terminate - Admin App or LLDD requests termination of a vport
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 043c3921164..0ce5f7cdfe2 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1361,7 +1361,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport,
return -EINVAL;
shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
- if (IS_ERR(shost)) {
+ if (!shost) {
printk(KERN_ERR "target discovery could not find host no %u\n",
ev->u.tgt_dscvr.host_no);
return -ENODEV;
@@ -1387,7 +1387,7 @@ iscsi_set_host_param(struct iscsi_transport *transport,
return -ENOSYS;
shost = scsi_host_lookup(ev->u.set_host_param.host_no);
- if (IS_ERR(shost)) {
+ if (!shost) {
printk(KERN_ERR "set_host_param could not find host no %u\n",
ev->u.set_host_param.host_no);
return -ENODEV;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e5e7d785645..a7b53be6336 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -47,6 +47,7 @@
#include <linux/blkpg.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/string_helpers.h>
#include <asm/uaccess.h>
#include <scsi/scsi.h>
@@ -86,6 +87,12 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
+#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
+#define SD_MINORS 16
+#else
+#define SD_MINORS 0
+#endif
+
static int sd_revalidate_disk(struct gendisk *);
static int sd_probe(struct device *);
static int sd_remove(struct device *);
@@ -159,7 +166,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
sd_print_sense_hdr(sdkp, &sshdr);
return -EINVAL;
}
- sd_revalidate_disk(sdkp->disk);
+ revalidate_disk(sdkp->disk);
return count;
}
@@ -377,7 +384,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
sector_t block = rq->sector;
sector_t threshold;
unsigned int this_count = rq->nr_sectors;
- unsigned int timeout = sdp->timeout;
int ret;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -578,7 +584,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->transfersize = sdp->sector_size;
SCpnt->underflow = this_count << 9;
SCpnt->allowed = SD_MAX_RETRIES;
- SCpnt->timeout_per_command = timeout;
/*
* This indicates that the command is ready from our end to be
@@ -910,7 +915,7 @@ static void sd_rescan(struct device *dev)
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
if (sdkp) {
- sd_revalidate_disk(sdkp->disk);
+ revalidate_disk(sdkp->disk);
scsi_disk_put(sdkp);
}
}
@@ -1429,27 +1434,21 @@ got_data:
*/
sector_size = 512;
}
+ blk_queue_hardsect_size(sdp->request_queue, sector_size);
+
{
- /*
- * The msdos fs needs to know the hardware sector size
- * So I have created this table. See ll_rw_blk.c
- * Jacques Gelinas (Jacques@solucorp.qc.ca)
- */
- int hard_sector = sector_size;
- sector_t sz = (sdkp->capacity/2) * (hard_sector/256);
- struct request_queue *queue = sdp->request_queue;
- sector_t mb = sz;
+ char cap_str_2[10], cap_str_10[10];
+ u64 sz = sdkp->capacity << ffz(~sector_size);
- blk_queue_hardsect_size(queue, hard_sector);
- /* avoid 64-bit division on 32-bit platforms */
- sector_div(sz, 625);
- mb -= sz - 974;
- sector_div(mb, 1950);
+ string_get_size(sz, STRING_UNITS_2, cap_str_2,
+ sizeof(cap_str_2));
+ string_get_size(sz, STRING_UNITS_10, cap_str_10,
+ sizeof(cap_str_10));
sd_printk(KERN_NOTICE, sdkp,
- "%llu %d-byte hardware sectors (%llu MB)\n",
+ "%llu %d-byte hardware sectors: (%s/%s)\n",
(unsigned long long)sdkp->capacity,
- hard_sector, (unsigned long long)mb);
+ sector_size, cap_str_10, cap_str_2);
}
/* Rescale capacity to 512-byte units */
@@ -1764,6 +1763,52 @@ static int sd_revalidate_disk(struct gendisk *disk)
}
/**
+ * sd_format_disk_name - format disk name
+ * @prefix: name prefix - ie. "sd" for SCSI disks
+ * @index: index of the disk to format name for
+ * @buf: output buffer
+ * @buflen: length of the output buffer
+ *
+ * SCSI disk names starts at sda. The 26th device is sdz and the
+ * 27th is sdaa. The last one for two lettered suffix is sdzz
+ * which is followed by sdaaa.
+ *
+ * This is basically 26 base counting with one extra 'nil' entry
+ * at the beggining from the second digit on and can be
+ * determined using similar method as 26 base conversion with the
+ * index shifted -1 after each digit is computed.
+ *
+ * CONTEXT:
+ * Don't care.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
+{
+ const int base = 'z' - 'a' + 1;
+ char *begin = buf + strlen(prefix);
+ char *end = buf + buflen;
+ char *p;
+ int unit;
+
+ p = end - 1;
+ *p = '\0';
+ unit = base;
+ do {
+ if (p == begin)
+ return -EINVAL;
+ *--p = 'a' + (index % unit);
+ index = (index / unit) - 1;
+ } while (index >= 0);
+
+ memmove(begin, p, end - p);
+ memcpy(buf, prefix, strlen(prefix));
+
+ return 0;
+}
+
+/**
* sd_probe - called during driver initialization and whenever a
* new scsi device is attached to the system. It is called once
* for each scsi device (not just disks) present.
@@ -1801,7 +1846,7 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
- gd = alloc_disk(16);
+ gd = alloc_disk(SD_MINORS);
if (!gd)
goto out_free;
@@ -1815,8 +1860,8 @@ static int sd_probe(struct device *dev)
if (error)
goto out_put;
- error = -EBUSY;
- if (index >= SD_MAX_DISKS)
+ error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
+ if (error)
goto out_free_index;
sdkp->device = sdp;
@@ -1826,11 +1871,12 @@ static int sd_probe(struct device *dev)
sdkp->openers = 0;
sdkp->previous_state = 1;
- if (!sdp->timeout) {
+ if (!sdp->request_queue->rq_timeout) {
if (sdp->type != TYPE_MOD)
- sdp->timeout = SD_TIMEOUT;
+ blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
else
- sdp->timeout = SD_MOD_TIMEOUT;
+ blk_queue_rq_timeout(sdp->request_queue,
+ SD_MOD_TIMEOUT);
}
device_initialize(&sdkp->dev);
@@ -1843,24 +1889,12 @@ static int sd_probe(struct device *dev)
get_device(&sdp->sdev_gendev);
- gd->major = sd_major((index & 0xf0) >> 4);
- gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
- gd->minors = 16;
- gd->fops = &sd_fops;
-
- if (index < 26) {
- sprintf(gd->disk_name, "sd%c", 'a' + index % 26);
- } else if (index < (26 + 1) * 26) {
- sprintf(gd->disk_name, "sd%c%c",
- 'a' + index / 26 - 1,'a' + index % 26);
- } else {
- const unsigned int m1 = (index / 26 - 1) / 26 - 1;
- const unsigned int m2 = (index / 26 - 1) % 26;
- const unsigned int m3 = index % 26;
- sprintf(gd->disk_name, "sd%c%c%c",
- 'a' + m1, 'a' + m2, 'a' + m3);
+ if (index < SD_MAX_DISKS) {
+ gd->major = sd_major((index & 0xf0) >> 4);
+ gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
+ gd->minors = SD_MINORS;
}
-
+ gd->fops = &sd_fops;
gd->private_data = &sdkp->driver;
gd->queue = sdkp->device->request_queue;
@@ -1869,7 +1903,7 @@ static int sd_probe(struct device *dev)
blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
gd->driverfs_dev = &sdp->sdev_gendev;
- gd->flags = GENHD_FL_DRIVERFS;
+ gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
if (sdp->removable)
gd->flags |= GENHD_FL_REMOVABLE;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 661f9f21650..ba9b9bbd4e7 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -47,7 +47,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
-#include <linux/scatterlist.h>
#include <linux/blktrace_api.h>
#include <linux/smp_lock.h>
@@ -69,7 +68,6 @@ static void sg_proc_cleanup(void);
#endif
#define SG_ALLOW_DIO_DEF 0
-#define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
#define SG_MAX_DEVS 32768
@@ -118,8 +116,8 @@ typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
unsigned bufflen; /* Size of (aggregate) data buffer */
- unsigned b_malloc_len; /* actual len malloc'ed in buffer */
- struct scatterlist *buffer;/* scatter list */
+ struct page **pages;
+ int page_order;
char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
unsigned char cmd_opcode; /* first byte of command */
} Sg_scatter_hold;
@@ -137,6 +135,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
char orphan; /* 1 -> drop on sight, 0 -> normal */
char sg_io_owned; /* 1 -> packet belongs to SG_IO */
volatile char done; /* 0->before bh, 1->before read, 2->read */
+ struct request *rq;
+ struct bio *bio;
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
@@ -175,8 +175,8 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
static int sg_fasync(int fd, struct file *filp, int mode);
/* tasklet or soft irq callback */
-static void sg_cmd_done(void *data, char *sense, int result, int resid);
-static int sg_start_req(Sg_request * srp);
+static void sg_rq_end_io(struct request *rq, int uptodate);
+static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static void sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
@@ -188,17 +188,11 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
int read_only, Sg_request **o_srp);
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking);
-static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
- int wr_xf, int *countp, unsigned char __user **up);
-static int sg_write_xfer(Sg_request * srp);
-static int sg_read_xfer(Sg_request * srp);
static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
static void sg_remove_scat(Sg_scatter_hold * schp);
static void sg_build_reserve(Sg_fd * sfp, int req_size);
static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
-static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
-static void sg_page_free(struct page *page, int size);
static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
@@ -206,7 +200,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static int sg_res_in_use(Sg_fd * sfp);
-static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
static Sg_device *sg_get_dev(int dev);
#ifdef CONFIG_SCSI_PROC_FS
static int sg_last_dev(void);
@@ -529,8 +522,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
err = -EFAULT;
goto err_out;
}
- err = sg_read_xfer(srp);
- err_out:
+err_out:
sg_finish_rem_req(srp);
return (0 == err) ? count : err;
}
@@ -612,7 +604,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
- hp->dxferp = (char __user *)buf + cmd_size;
+ if (hp->dxfer_direction == SG_DXFER_TO_DEV)
+ hp->dxferp = (char __user *)buf + cmd_size;
+ else
+ hp->dxferp = NULL;
hp->sbp = NULL;
hp->timeout = old_hdr.reply_len; /* structure abuse ... */
hp->flags = input_size; /* structure abuse ... */
@@ -732,16 +727,12 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- if ((k = sg_start_req(srp))) {
+ k = sg_start_req(srp, cmnd);
+ if (k) {
SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
return k; /* probably out of space --> ENOMEM */
}
- if ((k = sg_write_xfer(srp))) {
- SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n"));
- sg_finish_rem_req(srp);
- return k;
- }
if (sdp->detached) {
sg_finish_rem_req(srp);
return -ENODEV;
@@ -763,20 +754,11 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
break;
}
hp->duration = jiffies_to_msecs(jiffies);
-/* Now send everything of to mid-level. The next time we hear about this
- packet is when sg_cmd_done() is called (i.e. a callback). */
- if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer,
- hp->dxfer_len, srp->data.k_use_sg, timeout,
- SG_DEFAULT_RETRIES, srp, sg_cmd_done,
- GFP_ATOMIC)) {
- SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n"));
- /*
- * most likely out of mem, but could also be a bad map
- */
- sg_finish_rem_req(srp);
- return -ENOMEM;
- } else
- return 0;
+
+ srp->rq->timeout = timeout;
+ blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
+ srp->rq, 1, sg_rq_end_io);
+ return 0;
}
static int
@@ -1192,8 +1174,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Sg_fd *sfp;
unsigned long offset, len, sa;
Sg_scatter_hold *rsv_schp;
- struct scatterlist *sg;
- int k;
+ int k, length;
if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
return VM_FAULT_SIGBUS;
@@ -1203,15 +1184,14 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
offset, rsv_schp->k_use_sg));
- sg = rsv_schp->buffer;
sa = vma->vm_start;
- for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
- ++k, sg = sg_next(sg)) {
+ length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
+ for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
len = vma->vm_end - sa;
- len = (len < sg->length) ? len : sg->length;
+ len = (len < length) ? len : length;
if (offset < len) {
- struct page *page;
- page = virt_to_page(page_address(sg_page(sg)) + offset);
+ struct page *page = nth_page(rsv_schp->pages[k],
+ offset >> PAGE_SHIFT);
get_page(page); /* increment page count */
vmf->page = page;
return 0; /* success */
@@ -1233,8 +1213,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
Sg_fd *sfp;
unsigned long req_sz, len, sa;
Sg_scatter_hold *rsv_schp;
- int k;
- struct scatterlist *sg;
+ int k, length;
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
@@ -1248,11 +1227,10 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
return -ENOMEM; /* cannot map more than reserved buffer */
sa = vma->vm_start;
- sg = rsv_schp->buffer;
- for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
- ++k, sg = sg_next(sg)) {
+ length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
+ for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
len = vma->vm_end - sa;
- len = (len < sg->length) ? len : sg->length;
+ len = (len < length) ? len : length;
sa += len;
}
@@ -1263,16 +1241,19 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-/* This function is a "bottom half" handler that is called by the
- * mid level when a command is completed (or has failed). */
-static void
-sg_cmd_done(void *data, char *sense, int result, int resid)
+/*
+ * This function is a "bottom half" handler that is called by the mid
+ * level when a command is completed (or has failed).
+ */
+static void sg_rq_end_io(struct request *rq, int uptodate)
{
- Sg_request *srp = data;
+ struct sg_request *srp = rq->end_io_data;
Sg_device *sdp = NULL;
Sg_fd *sfp;
unsigned long iflags;
unsigned int ms;
+ char *sense;
+ int result, resid;
if (NULL == srp) {
printk(KERN_ERR "sg_cmd_done: NULL request\n");
@@ -1286,6 +1267,9 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
return;
}
+ sense = rq->sense;
+ result = rq->errors;
+ resid = rq->data_len;
SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1296,7 +1280,6 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
if (0 != result) {
struct scsi_sense_hdr sshdr;
- memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
srp->header.status = 0xff & result;
srp->header.masked_status = status_byte(result);
srp->header.msg_status = msg_byte(result);
@@ -1634,37 +1617,79 @@ exit_sg(void)
idr_destroy(&sg_index_idr);
}
-static int
-sg_start_req(Sg_request * srp)
+static int sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
+ struct request *rq;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
int dxfer_dir = hp->dxfer_direction;
+ unsigned int iov_count = hp->iovec_count;
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
+ struct request_queue *q = sfp->parentdp->device->request_queue;
+ struct rq_map_data *md, map_data;
+ int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
+
+ SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
+ dxfer_len));
+
+ rq = blk_get_request(q, rw, GFP_ATOMIC);
+ if (!rq)
+ return -ENOMEM;
+
+ memcpy(rq->cmd, cmd, hp->cmd_len);
+
+ rq->cmd_len = hp->cmd_len;
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+ srp->rq = rq;
+ rq->end_io_data = srp;
+ rq->sense = srp->sense_b;
+ rq->retries = SG_DEFAULT_RETRIES;
- SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
- if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
- (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
- (!sfp->parentdp->device->host->unchecked_isa_dma)) {
- res = sg_build_direct(srp, sfp, dxfer_len);
- if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
- return res;
- }
- if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
- sg_link_reserve(sfp, srp, dxfer_len);
- else {
- res = sg_build_indirect(req_schp, sfp, dxfer_len);
- if (res) {
- sg_remove_scat(req_schp);
- return res;
+
+ if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
+ dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
+ !sfp->parentdp->device->host->unchecked_isa_dma &&
+ blk_rq_aligned(q, hp->dxferp, dxfer_len))
+ md = NULL;
+ else
+ md = &map_data;
+
+ if (md) {
+ if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
+ sg_link_reserve(sfp, srp, dxfer_len);
+ else {
+ res = sg_build_indirect(req_schp, sfp, dxfer_len);
+ if (res)
+ return res;
}
+
+ md->pages = req_schp->pages;
+ md->page_order = req_schp->page_order;
+ md->nr_entries = req_schp->k_use_sg;
}
- return 0;
+
+ if (iov_count)
+ res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count,
+ hp->dxfer_len, GFP_ATOMIC);
+ else
+ res = blk_rq_map_user(q, rq, md, hp->dxferp,
+ hp->dxfer_len, GFP_ATOMIC);
+
+ if (!res) {
+ srp->bio = rq->bio;
+
+ if (!md) {
+ req_schp->dio_in_use = 1;
+ hp->info |= SG_INFO_DIRECT_IO;
+ }
+ }
+ return res;
}
static void
@@ -1678,186 +1703,37 @@ sg_finish_rem_req(Sg_request * srp)
sg_unlink_reserve(sfp, srp);
else
sg_remove_scat(req_schp);
+
+ if (srp->rq) {
+ if (srp->bio)
+ blk_rq_unmap_user(srp->bio);
+
+ blk_put_request(srp->rq);
+ }
+
sg_remove_request(sfp, srp);
}
static int
sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
{
- int sg_bufflen = tablesize * sizeof(struct scatterlist);
+ int sg_bufflen = tablesize * sizeof(struct page *);
gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
- /*
- * TODO: test without low_dma, we should not need it since
- * the block layer will bounce the buffer for us
- *
- * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
- */
- if (sfp->low_dma)
- gfp_flags |= GFP_DMA;
- schp->buffer = kzalloc(sg_bufflen, gfp_flags);
- if (!schp->buffer)
+ schp->pages = kzalloc(sg_bufflen, gfp_flags);
+ if (!schp->pages)
return -ENOMEM;
- sg_init_table(schp->buffer, tablesize);
schp->sglist_len = sg_bufflen;
return tablesize; /* number of scat_gath elements allocated */
}
-#ifdef SG_ALLOW_DIO_CODE
-/* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
- /* TODO: hopefully we can use the generic block layer code */
-
-/* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
- - mapping of all pages not successful
- (i.e., either completely successful or fails)
-*/
-static int
-st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
- unsigned long uaddr, size_t count, int rw)
-{
- unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = uaddr >> PAGE_SHIFT;
- const int nr_pages = end - start;
- int res, i, j;
- struct page **pages;
-
- /* User attempted Overflow! */
- if ((uaddr + count) < uaddr)
- return -EINVAL;
-
- /* Too big */
- if (nr_pages > max_pages)
- return -ENOMEM;
-
- /* Hmm? */
- if (count == 0)
- return 0;
-
- if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
- return -ENOMEM;
-
- /* Try to fault in all of the necessary pages */
- down_read(&current->mm->mmap_sem);
- /* rw==READ means read from drive, write into memory area */
- res = get_user_pages(
- current,
- current->mm,
- uaddr,
- nr_pages,
- rw == READ,
- 0, /* don't force */
- pages,
- NULL);
- up_read(&current->mm->mmap_sem);
-
- /* Errors and no page mapped should return here */
- if (res < nr_pages)
- goto out_unmap;
-
- for (i=0; i < nr_pages; i++) {
- /* FIXME: flush superflous for rw==READ,
- * probably wrong function for rw==WRITE
- */
- flush_dcache_page(pages[i]);
- /* ?? Is locking needed? I don't think so */
- /* if (!trylock_page(pages[i]))
- goto out_unlock; */
- }
-
- sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK);
- if (nr_pages > 1) {
- sgl[0].length = PAGE_SIZE - sgl[0].offset;
- count -= sgl[0].length;
- for (i=1; i < nr_pages ; i++)
- sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0);
- }
- else {
- sgl[0].length = count;
- }
-
- kfree(pages);
- return nr_pages;
-
- out_unmap:
- if (res > 0) {
- for (j=0; j < res; j++)
- page_cache_release(pages[j]);
- res = 0;
- }
- kfree(pages);
- return res;
-}
-
-
-/* And unmap them... */
-static int
-st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
- int dirtied)
-{
- int i;
-
- for (i=0; i < nr_pages; i++) {
- struct page *page = sg_page(&sgl[i]);
-
- if (dirtied)
- SetPageDirty(page);
- /* unlock_page(page); */
- /* FIXME: cache flush missing for rw==READ
- * FIXME: call the correct reference counting function
- */
- page_cache_release(page);
- }
-
- return 0;
-}
-
-/* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
-#endif
-
-
-/* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
-static int
-sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
-{
-#ifdef SG_ALLOW_DIO_CODE
- sg_io_hdr_t *hp = &srp->header;
- Sg_scatter_hold *schp = &srp->data;
- int sg_tablesize = sfp->parentdp->sg_tablesize;
- int mx_sc_elems, res;
- struct scsi_device *sdev = sfp->parentdp->device;
-
- if (((unsigned long)hp->dxferp &
- queue_dma_alignment(sdev->request_queue)) != 0)
- return 1;
-
- mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
- if (mx_sc_elems <= 0) {
- return 1;
- }
- res = st_map_user_pages(schp->buffer, mx_sc_elems,
- (unsigned long)hp->dxferp, dxfer_len,
- (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
- if (res <= 0) {
- sg_remove_scat(schp);
- return 1;
- }
- schp->k_use_sg = res;
- schp->dio_in_use = 1;
- hp->info |= SG_INFO_DIRECT_IO;
- return 0;
-#else
- return 1;
-#endif
-}
-
static int
sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
{
- struct scatterlist *sg;
- int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
+ int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
int sg_tablesize = sfp->parentdp->sg_tablesize;
- int blk_size = buff_size;
- struct page *p = NULL;
+ int blk_size = buff_size, order;
+ gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
if (blk_size < 0)
return -EFAULT;
@@ -1881,15 +1757,26 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
} else
scatter_elem_sz_prev = num;
}
- for (k = 0, sg = schp->buffer, rem_sz = blk_size;
- (rem_sz > 0) && (k < mx_sc_elems);
- ++k, rem_sz -= ret_sz, sg = sg_next(sg)) {
-
+
+ if (sfp->low_dma)
+ gfp_mask |= GFP_DMA;
+
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ gfp_mask |= __GFP_ZERO;
+
+ order = get_order(num);
+retry:
+ ret_sz = 1 << (PAGE_SHIFT + order);
+
+ for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
+ k++, rem_sz -= ret_sz) {
+
num = (rem_sz > scatter_elem_sz_prev) ?
- scatter_elem_sz_prev : rem_sz;
- p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
- if (!p)
- return -ENOMEM;
+ scatter_elem_sz_prev : rem_sz;
+
+ schp->pages[k] = alloc_pages(gfp_mask, order);
+ if (!schp->pages[k])
+ goto out;
if (num == scatter_elem_sz_prev) {
if (unlikely(ret_sz > scatter_elem_sz_prev)) {
@@ -1897,12 +1784,12 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
scatter_elem_sz_prev = ret_sz;
}
}
- sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0);
SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
"ret_sz=%d\n", k, num, ret_sz));
} /* end of for loop */
+ schp->page_order = order;
schp->k_use_sg = k;
SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
"rem_sz=%d\n", k, rem_sz));
@@ -1910,223 +1797,42 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
schp->bufflen = blk_size;
if (rem_sz > 0) /* must have failed */
return -ENOMEM;
-
return 0;
-}
-
-static int
-sg_write_xfer(Sg_request * srp)
-{
- sg_io_hdr_t *hp = &srp->header;
- Sg_scatter_hold *schp = &srp->data;
- struct scatterlist *sg = schp->buffer;
- int num_xfer = 0;
- int j, k, onum, usglen, ksglen, res;
- int iovec_count = (int) hp->iovec_count;
- int dxfer_dir = hp->dxfer_direction;
- unsigned char *p;
- unsigned char __user *up;
- int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
-
- if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
- (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
- num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
- if (schp->bufflen < num_xfer)
- num_xfer = schp->bufflen;
- }
- if ((num_xfer <= 0) || (schp->dio_in_use) ||
- (new_interface
- && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
- return 0;
-
- SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
- num_xfer, iovec_count, schp->k_use_sg));
- if (iovec_count) {
- onum = iovec_count;
- if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
- return -EFAULT;
- } else
- onum = 1;
-
- ksglen = sg->length;
- p = page_address(sg_page(sg));
- for (j = 0, k = 0; j < onum; ++j) {
- res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
- if (res)
- return res;
-
- for (; p; sg = sg_next(sg), ksglen = sg->length,
- p = page_address(sg_page(sg))) {
- if (usglen <= 0)
- break;
- if (ksglen > usglen) {
- if (usglen >= num_xfer) {
- if (__copy_from_user(p, up, num_xfer))
- return -EFAULT;
- return 0;
- }
- if (__copy_from_user(p, up, usglen))
- return -EFAULT;
- p += usglen;
- ksglen -= usglen;
- break;
- } else {
- if (ksglen >= num_xfer) {
- if (__copy_from_user(p, up, num_xfer))
- return -EFAULT;
- return 0;
- }
- if (__copy_from_user(p, up, ksglen))
- return -EFAULT;
- up += ksglen;
- usglen -= ksglen;
- }
- ++k;
- if (k >= schp->k_use_sg)
- return 0;
- }
- }
-
- return 0;
-}
+out:
+ for (i = 0; i < k; i++)
+ __free_pages(schp->pages[k], order);
-static int
-sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
- int wr_xf, int *countp, unsigned char __user **up)
-{
- int num_xfer = (int) hp->dxfer_len;
- unsigned char __user *p = hp->dxferp;
- int count;
+ if (--order >= 0)
+ goto retry;
- if (0 == sg_num) {
- if (wr_xf && ('\0' == hp->interface_id))
- count = (int) hp->flags; /* holds "old" input_size */
- else
- count = num_xfer;
- } else {
- sg_iovec_t iovec;
- if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
- return -EFAULT;
- p = iovec.iov_base;
- count = (int) iovec.iov_len;
- }
- if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
- return -EFAULT;
- if (up)
- *up = p;
- if (countp)
- *countp = count;
- return 0;
+ return -ENOMEM;
}
static void
sg_remove_scat(Sg_scatter_hold * schp)
{
SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
- if (schp->buffer && (schp->sglist_len > 0)) {
- struct scatterlist *sg = schp->buffer;
-
- if (schp->dio_in_use) {
-#ifdef SG_ALLOW_DIO_CODE
- st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
-#endif
- } else {
+ if (schp->pages && schp->sglist_len > 0) {
+ if (!schp->dio_in_use) {
int k;
- for (k = 0; (k < schp->k_use_sg) && sg_page(sg);
- ++k, sg = sg_next(sg)) {
+ for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
SCSI_LOG_TIMEOUT(5, printk(
- "sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
- k, sg_page(sg), sg->length));
- sg_page_free(sg_page(sg), sg->length);
+ "sg_remove_scat: k=%d, pg=0x%p\n",
+ k, schp->pages[k]));
+ __free_pages(schp->pages[k], schp->page_order);
}
- }
- kfree(schp->buffer);
- }
- memset(schp, 0, sizeof (*schp));
-}
-static int
-sg_read_xfer(Sg_request * srp)
-{
- sg_io_hdr_t *hp = &srp->header;
- Sg_scatter_hold *schp = &srp->data;
- struct scatterlist *sg = schp->buffer;
- int num_xfer = 0;
- int j, k, onum, usglen, ksglen, res;
- int iovec_count = (int) hp->iovec_count;
- int dxfer_dir = hp->dxfer_direction;
- unsigned char *p;
- unsigned char __user *up;
- int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
-
- if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
- || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
- num_xfer = hp->dxfer_len;
- if (schp->bufflen < num_xfer)
- num_xfer = schp->bufflen;
- }
- if ((num_xfer <= 0) || (schp->dio_in_use) ||
- (new_interface
- && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
- return 0;
-
- SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
- num_xfer, iovec_count, schp->k_use_sg));
- if (iovec_count) {
- onum = iovec_count;
- if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
- return -EFAULT;
- } else
- onum = 1;
-
- p = page_address(sg_page(sg));
- ksglen = sg->length;
- for (j = 0, k = 0; j < onum; ++j) {
- res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
- if (res)
- return res;
-
- for (; p; sg = sg_next(sg), ksglen = sg->length,
- p = page_address(sg_page(sg))) {
- if (usglen <= 0)
- break;
- if (ksglen > usglen) {
- if (usglen >= num_xfer) {
- if (__copy_to_user(up, p, num_xfer))
- return -EFAULT;
- return 0;
- }
- if (__copy_to_user(up, p, usglen))
- return -EFAULT;
- p += usglen;
- ksglen -= usglen;
- break;
- } else {
- if (ksglen >= num_xfer) {
- if (__copy_to_user(up, p, num_xfer))
- return -EFAULT;
- return 0;
- }
- if (__copy_to_user(up, p, ksglen))
- return -EFAULT;
- up += ksglen;
- usglen -= ksglen;
- }
- ++k;
- if (k >= schp->k_use_sg)
- return 0;
+ kfree(schp->pages);
}
}
-
- return 0;
+ memset(schp, 0, sizeof (*schp));
}
static int
sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
{
Sg_scatter_hold *schp = &srp->data;
- struct scatterlist *sg = schp->buffer;
int k, num;
SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
@@ -2134,15 +1840,15 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
if ((!outp) || (num_read_xfer <= 0))
return 0;
- for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) {
- num = sg->length;
+ num = 1 << (PAGE_SHIFT + schp->page_order);
+ for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
if (num > num_read_xfer) {
- if (__copy_to_user(outp, page_address(sg_page(sg)),
+ if (__copy_to_user(outp, page_address(schp->pages[k]),
num_read_xfer))
return -EFAULT;
break;
} else {
- if (__copy_to_user(outp, page_address(sg_page(sg)),
+ if (__copy_to_user(outp, page_address(schp->pages[k]),
num))
return -EFAULT;
num_read_xfer -= num;
@@ -2177,24 +1883,21 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
{
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
- struct scatterlist *sg = rsv_schp->buffer;
int k, num, rem;
srp->res_used = 1;
SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
rem = size;
- for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) {
- num = sg->length;
+ num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
+ for (k = 0; k < rsv_schp->k_use_sg; k++) {
if (rem <= num) {
- sfp->save_scat_len = num;
- sg->length = rem;
req_schp->k_use_sg = k + 1;
req_schp->sglist_len = rsv_schp->sglist_len;
- req_schp->buffer = rsv_schp->buffer;
+ req_schp->pages = rsv_schp->pages;
req_schp->bufflen = size;
- req_schp->b_malloc_len = rsv_schp->b_malloc_len;
+ req_schp->page_order = rsv_schp->page_order;
break;
} else
rem -= num;
@@ -2208,22 +1911,13 @@ static void
sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
{
Sg_scatter_hold *req_schp = &srp->data;
- Sg_scatter_hold *rsv_schp = &sfp->reserve;
SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
(int) req_schp->k_use_sg));
- if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
- struct scatterlist *sg = rsv_schp->buffer;
-
- if (sfp->save_scat_len > 0)
- (sg + (req_schp->k_use_sg - 1))->length =
- (unsigned) sfp->save_scat_len;
- else
- SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
- }
req_schp->k_use_sg = 0;
req_schp->bufflen = 0;
- req_schp->buffer = NULL;
+ req_schp->pages = NULL;
+ req_schp->page_order = 0;
req_schp->sglist_len = 0;
sfp->save_scat_len = 0;
srp->res_used = 0;
@@ -2481,53 +2175,6 @@ sg_res_in_use(Sg_fd * sfp)
return srp ? 1 : 0;
}
-/* The size fetched (value output via retSzp) set when non-NULL return */
-static struct page *
-sg_page_malloc(int rqSz, int lowDma, int *retSzp)
-{
- struct page *resp = NULL;
- gfp_t page_mask;
- int order, a_size;
- int resSz;
-
- if ((rqSz <= 0) || (NULL == retSzp))
- return resp;
-
- if (lowDma)
- page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
- else
- page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
-
- for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
- order++, a_size <<= 1) ;
- resSz = a_size; /* rounded up if necessary */
- resp = alloc_pages(page_mask, order);
- while ((!resp) && order) {
- --order;
- a_size >>= 1; /* divide by 2, until PAGE_SIZE */
- resp = alloc_pages(page_mask, order); /* try half */
- resSz = a_size;
- }
- if (resp) {
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- memset(page_address(resp), 0, resSz);
- *retSzp = resSz;
- }
- return resp;
-}
-
-static void
-sg_page_free(struct page *page, int size)
-{
- int order, a_size;
-
- if (!page)
- return;
- for (order = 0, a_size = PAGE_SIZE; a_size < size;
- order++, a_size <<= 1) ;
- __free_pages(page, order);
-}
-
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 27f5bfd1def..0f17009c99d 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
static int sr_prep_fn(struct request_queue *q, struct request *rq)
{
- int block=0, this_count, s_size, timeout = SR_TIMEOUT;
+ int block = 0, this_count, s_size;
struct scsi_cd *cd;
struct scsi_cmnd *SCpnt;
struct scsi_device *sdp = q->queuedata;
@@ -461,7 +461,6 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->transfersize = cd->device->sector_size;
SCpnt->underflow = this_count << 9;
SCpnt->allowed = MAX_RETRIES;
- SCpnt->timeout_per_command = timeout;
/*
* This indicates that the command is ready from our end to be
@@ -620,6 +619,8 @@ static int sr_probe(struct device *dev)
disk->fops = &sr_bdops;
disk->flags = GENHD_FL_CD;
+ blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
+
cd->device = sdev;
cd->disk = disk;
cd->driver = &sr_template;
@@ -878,7 +879,7 @@ static void sr_kref_release(struct kref *kref)
struct gendisk *disk = cd->disk;
spin_lock(&sr_index_lock);
- clear_bit(disk->first_minor, sr_index_bits);
+ clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
spin_unlock(&sr_index_lock);
unregister_cdrom(&cd->cdi);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index d39107b7669..f4e6cde1fd0 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
* Shorten our settle_time if needed for
* this command not to time out.
*/
- if (np->s.settle_time_valid && cmd->timeout_per_command) {
- unsigned long tlimit = jiffies + cmd->timeout_per_command;
+ if (np->s.settle_time_valid && cmd->request->timeout) {
+ unsigned long tlimit = jiffies + cmd->request->timeout;
tlimit -= SYM_CONF_TIMER_INTERVAL*2;
if (time_after(np->s.settle_time, tlimit)) {
np->s.settle_time = tlimit;
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 1723d71cbf3..69ac6e590f1 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2573,8 +2573,8 @@ static struct pci_driver dc390_driver = {
static int __init dc390_module_init(void)
{
if (!disable_clustering)
- printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n"
- "\twith \"disable_clustering=1\" and report to maintainers\n");
+ printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
+ printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
if (tmscsim[0] == -1 || tmscsim[0] > 15) {
tmscsim[0] = 7;
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 3a6da80b081..61fb8b6d19a 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -131,7 +131,8 @@ struct atmel_uart_char {
struct atmel_uart_port {
struct uart_port uart; /* uart */
struct clk *clk; /* uart clock */
- unsigned short suspended; /* is port suspended? */
+ int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
+ u32 backup_imr; /* IMR saved during suspend */
int break_active; /* break being received */
short use_dma_rx; /* enable PDC receiver */
@@ -984,8 +985,15 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
* This is called on uart_open() or a resume event.
*/
clk_enable(atmel_port->clk);
+
+ /* re-enable interrupts if we disabled some on suspend */
+ UART_PUT_IER(port, atmel_port->backup_imr);
break;
case 3:
+ /* Back up the interrupt mask and disable all interrupts */
+ atmel_port->backup_imr = UART_GET_IMR(port);
+ UART_PUT_IDR(port, -1);
+
/*
* Disable the peripheral clock for this serial port.
* This is called on uart_close() or a suspend event.
@@ -1475,13 +1483,12 @@ static int atmel_serial_suspend(struct platform_device *pdev,
cpu_relax();
}
- if (device_may_wakeup(&pdev->dev)
- && !atmel_serial_clk_will_stop())
- enable_irq_wake(port->irq);
- else {
- uart_suspend_port(&atmel_uart, port);
- atmel_port->suspended = 1;
- }
+ /* we can not wake up if we're running on slow clock */
+ atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
+ if (atmel_serial_clk_will_stop())
+ device_set_wakeup_enable(&pdev->dev, 0);
+
+ uart_suspend_port(&atmel_uart, port);
return 0;
}
@@ -1491,11 +1498,8 @@ static int atmel_serial_resume(struct platform_device *pdev)
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (atmel_port->suspended) {
- uart_resume_port(&atmel_uart, port);
- atmel_port->suspended = 0;
- } else
- disable_irq_wake(port->irq);
+ uart_resume_port(&atmel_uart, port);
+ device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
return 0;
}
@@ -1513,6 +1517,8 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
BUILD_BUG_ON(!is_power_of_2(ATMEL_SERIAL_RINGSIZE));
port = &atmel_ports[pdev->id];
+ port->backup_imr = 0;
+
atmel_init_port(port, pdev);
if (!atmel_use_dma_rx(&port->uart)) {
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c
index c4eaacd6e55..b872bfaf4bd 100644
--- a/drivers/spi/orion_spi.c
+++ b/drivers/spi/orion_spi.c
@@ -427,7 +427,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m)
goto msg_rejected;
}
- if (t->speed_hz < orion_spi->min_speed) {
+ if (t->speed_hz && t->speed_hz < orion_spi->min_speed) {
dev_err(&spi->dev,
"message rejected : "
"device min speed (%d Hz) exceeds "
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 0e53354c1cf..d47d3636227 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -49,7 +49,7 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
-#define IS_DMA_ALIGNED(x) (((x) & 0x07) == 0)
+#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0)
#define MAX_DMA_LEN 8191
/*
@@ -896,7 +896,7 @@ static void pump_transfers(unsigned long data)
|| transfer->rx_dma || transfer->tx_dma) {
dev_err(&drv_data->pdev->dev,
"pump_transfers: mapped transfer length "
- "of %lu is greater than %d\n",
+ "of %u is greater than %d\n",
transfer->len, MAX_DMA_LEN);
message->status = -EINVAL;
giveback(drv_data);
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 87ab2443e66..0ffabf5c0b6 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -471,6 +471,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
#endif
break;
case SSB_BUSTYPE_SSB:
+ dev->dma_mask = &dev->coherent_dma_mask;
break;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 8abd4e59bf4..8ab389dca2b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1876,7 +1876,8 @@ int usb_add_hcd(struct usb_hcd *hcd,
* with IRQF_SHARED. As usb_hcd_irq() will always disable
* interrupts we can remove it here.
*/
- irqflags &= ~IRQF_DISABLED;
+ if (irqflags & IRQF_SHARED)
+ irqflags &= ~IRQF_DISABLED;
snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
hcd->driver->description, hcd->self.busnum);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 6a5cb018383..d99963873e3 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2683,35 +2683,17 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
USB_PORT_STAT_C_ENABLE);
#endif
- /* Try to use the debounce delay for protection against
- * port-enable changes caused, for example, by EMI.
- */
- if (portchange & (USB_PORT_STAT_C_CONNECTION |
- USB_PORT_STAT_C_ENABLE)) {
- status = hub_port_debounce(hub, port1);
- if (status < 0) {
- if (printk_ratelimit())
- dev_err (hub_dev, "connect-debounce failed, "
- "port %d disabled\n", port1);
- portstatus &= ~USB_PORT_STAT_CONNECTION;
- } else {
- portstatus = status;
- }
- }
-
/* Try to resuscitate an existing device */
udev = hdev->children[port1-1];
if ((portstatus & USB_PORT_STAT_CONNECTION) && udev &&
udev->state != USB_STATE_NOTATTACHED) {
-
usb_lock_device(udev);
if (portstatus & USB_PORT_STAT_ENABLE) {
status = 0; /* Nothing to do */
- } else if (!udev->persist_enabled) {
- status = -ENODEV; /* Mustn't resuscitate */
#ifdef CONFIG_USB_SUSPEND
- } else if (udev->state == USB_STATE_SUSPENDED) {
+ } else if (udev->state == USB_STATE_SUSPENDED &&
+ udev->persist_enabled) {
/* For a suspended device, treat this as a
* remote wakeup event.
*/
@@ -2726,7 +2708,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
#endif
} else {
- status = usb_reset_device(udev);
+ status = -ENODEV; /* Don't resuscitate */
}
usb_unlock_device(udev);
@@ -2741,6 +2723,19 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
usb_disconnect(&hdev->children[port1-1]);
clear_bit(port1, hub->change_bits);
+ if (portchange & (USB_PORT_STAT_C_CONNECTION |
+ USB_PORT_STAT_C_ENABLE)) {
+ status = hub_port_debounce(hub, port1);
+ if (status < 0) {
+ if (printk_ratelimit())
+ dev_err(hub_dev, "connect-debounce failed, "
+ "port %d disabled\n", port1);
+ portstatus &= ~USB_PORT_STAT_CONNECTION;
+ } else {
+ portstatus = status;
+ }
+ }
+
/* Return now if debouncing failed or nothing is connected */
if (!(portstatus & USB_PORT_STAT_CONNECTION)) {
@@ -2748,7 +2743,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2
&& !(portstatus & (1 << USB_PORT_FEAT_POWER)))
set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
-
+
if (portstatus & USB_PORT_STAT_ENABLE)
goto done;
return;
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c
index 1cfccf102a2..45ad556169f 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.c
+++ b/drivers/usb/gadget/fsl_usb2_udc.c
@@ -223,7 +223,7 @@ static int dr_controller_setup(struct fsl_udc *udc)
fsl_writel(tmp, &dr_regs->endpointlistaddr);
VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x",
- (int)udc->ep_qh, (int)tmp,
+ udc->ep_qh, (int)tmp,
fsl_readl(&dr_regs->endpointlistaddr));
/* Config PHY interface */
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 574c53831a0..bb54cca4c54 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -787,7 +787,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
omap_set_dma_dest_params(ep->lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
- (unsigned long) io_v2p(UDC_DATA_DMA),
+ UDC_DATA_DMA,
0, 0);
}
} else {
@@ -804,7 +804,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
omap_set_dma_src_params(ep->lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
- (unsigned long) io_v2p(UDC_DATA_DMA),
+ UDC_DATA_DMA,
0, 0);
/* EMIFF or SDRC */
omap_set_dma_dest_burst_mode(ep->lch,
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index d9d53f289ca..8409e0705d6 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -145,16 +145,6 @@ static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
return -ETIMEDOUT;
}
-static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
- u32 mask, u32 done, int usec)
-{
- int error = handshake(ehci, ptr, mask, done, usec);
- if (error)
- ehci_to_hcd(ehci)->state = HC_STATE_HALT;
-
- return error;
-}
-
/* force HC to halt state from unknown (EHCI spec section 2.3) */
static int ehci_halt (struct ehci_hcd *ehci)
{
@@ -173,6 +163,22 @@ static int ehci_halt (struct ehci_hcd *ehci)
STS_HALT, STS_HALT, 16 * 125);
}
+static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+{
+ int error;
+
+ error = handshake(ehci, ptr, mask, done, usec);
+ if (error) {
+ ehci_halt(ehci);
+ ehci_to_hcd(ehci)->state = HC_STATE_HALT;
+ ehci_err(ehci, "force halt; handhake %p %08x %08x -> %d\n",
+ ptr, mask, done, error);
+ }
+
+ return error;
+}
+
/* put TDI/ARC silicon into EHCI mode */
static void tdi_reset (struct ehci_hcd *ehci)
{
diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
index f9575c40912..9c32063a0c2 100644
--- a/drivers/usb/host/ehci-ixp4xx.c
+++ b/drivers/usb/host/ehci-ixp4xx.c
@@ -1,7 +1,7 @@
/*
* IXP4XX EHCI Host Controller Driver
*
- * Author: Vladimir Barinov <vbarinov@ru.mvista.com>
+ * Author: Vladimir Barinov <vbarinov@embeddedalley.com>
*
* Based on "ehci-fsl.c" by Randy Vinson <rvinson@mvista.com>
*
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index b7853c8bac0..4a0c5a78b2e 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -437,6 +437,9 @@ static int enable_periodic (struct ehci_hcd *ehci)
u32 cmd;
int status;
+ if (ehci->periodic_sched++)
+ return 0;
+
/* did clearing PSE did take effect yet?
* takes effect only at frame boundaries...
*/
@@ -461,6 +464,9 @@ static int disable_periodic (struct ehci_hcd *ehci)
u32 cmd;
int status;
+ if (--ehci->periodic_sched)
+ return 0;
+
/* did setting PSE not take effect yet?
* takes effect only at frame boundaries...
*/
@@ -544,13 +550,10 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
: (qh->usecs * 8);
/* maybe enable periodic schedule processing */
- if (!ehci->periodic_sched++)
- return enable_periodic (ehci);
-
- return 0;
+ return enable_periodic(ehci);
}
-static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
+static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
unsigned period;
@@ -586,9 +589,7 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
qh_put (qh);
/* maybe turn off periodic schedule */
- ehci->periodic_sched--;
- if (!ehci->periodic_sched)
- (void) disable_periodic (ehci);
+ return disable_periodic(ehci);
}
static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
@@ -1562,9 +1563,7 @@ itd_link_urb (
urb->hcpriv = NULL;
timer_action (ehci, TIMER_IO_WATCHDOG);
- if (unlikely (!ehci->periodic_sched++))
- return enable_periodic (ehci);
- return 0;
+ return enable_periodic(ehci);
}
#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
@@ -1642,7 +1641,7 @@ itd_complete (
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
- ehci->periodic_sched--;
+ (void) disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (unlikely (list_empty (&stream->td_list))) {
@@ -1951,9 +1950,7 @@ sitd_link_urb (
urb->hcpriv = NULL;
timer_action (ehci, TIMER_IO_WATCHDOG);
- if (!ehci->periodic_sched++)
- return enable_periodic (ehci);
- return 0;
+ return enable_periodic(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -2019,7 +2016,7 @@ sitd_complete (
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
- ehci->periodic_sched--;
+ (void) disable_periodic(ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (list_empty (&stream->td_list)) {
@@ -2243,8 +2240,7 @@ restart:
if (unlikely (modified)) {
if (likely(ehci->periodic_sched > 0))
goto restart;
- /* maybe we can short-circuit this scan! */
- disable_periodic(ehci);
+ /* short-circuit this scan */
now_uframe = clock;
break;
}
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index a0017486ad4..58b2b8fc943 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -9,6 +9,7 @@ comment "Enable Host or Gadget support to see Inventra options"
# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
config USB_MUSB_HDRC
depends on (USB || USB_GADGET) && HAVE_CLK
+ depends on !SUPERH
select TWL4030_USB if MACH_OMAP_3430SDP
tristate 'Inventra Highspeed Dual Role Controller (TI, ...)'
help
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c5b8f0296fc..128e949db47 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -100,8 +100,8 @@
#include <linux/io.h>
#ifdef CONFIG_ARM
-#include <asm/arch/hardware.h>
-#include <asm/arch/memory.h>
+#include <mach/hardware.h>
+#include <mach/memory.h>
#include <asm/mach-types.h>
#endif
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 298b22e6ad0..9d2dcb121c5 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -35,8 +35,8 @@
#include <linux/io.h>
#include <asm/mach-types.h>
-#include <asm/arch/hardware.h>
-#include <asm/arch/mux.h>
+#include <mach/hardware.h>
+#include <mach/mux.h>
#include "musb_core.h"
#include "omap2430.h"
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index 786a62071f7..dc7670718cd 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -11,8 +11,8 @@
#define __MUSB_OMAP243X_H__
#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
-#include <asm/arch/hardware.h>
-#include <asm/arch/usb.h>
+#include <mach/hardware.h>
+#include <mach/usb.h>
/*
* OMAP2430-specific definitions
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c
index 442cba69cce..1279553381e 100644
--- a/drivers/usb/serial/cp2101.c
+++ b/drivers/usb/serial/cp2101.c
@@ -72,6 +72,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
{ USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
{ USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
+ { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
@@ -83,6 +84,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
{ USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
{ USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
@@ -93,6 +95,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 984f6eff4c4..3dc93b542b3 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -654,6 +654,9 @@ static struct usb_device_id id_table_combined [] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 382265bba96..8a5b6df3a97 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -750,6 +750,7 @@
#define PAPOUCH_VID 0x5050 /* Vendor ID */
#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
+#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
/*
* ACG Identification Technologies GmbH products (http://www.acg.de/).
@@ -838,6 +839,10 @@
/* Rig Expert Ukraine devices */
#define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */
+/* Domintell products http://www.domintell.com */
+#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */
+#define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */
+
/* Commands */
#define FTDI_SIO_RESET 0 /* Reset the port */
#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9f9cd36455f..73f8277f88f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -218,6 +218,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
#define ZTE_PRODUCT_MF628 0x0015
+#define ZTE_PRODUCT_CDMA_TECH 0xfffe
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -347,6 +348,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 706033753ad..ea1a103c99b 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -14,7 +14,7 @@
Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
*/
-#define DRIVER_VERSION "v.1.2.13a"
+#define DRIVER_VERSION "v.1.3.2"
#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>"
#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
@@ -30,9 +30,6 @@
#define SWIMS_USB_REQUEST_SetPower 0x00
#define SWIMS_USB_REQUEST_SetNmea 0x07
-#define SWIMS_USB_REQUEST_SetMode 0x0B
-#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
-#define SWIMS_SET_MODE_Modem 0x0001
/* per port private data */
#define N_IN_URB 4
@@ -163,7 +160,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
{ USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
- { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */
+ { USB_DEVICE(0x03f0, 0x1b1d) }, /* HP ev2200 a.k.a MC5720 */
{ USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
{ USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */
{ USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */
@@ -175,6 +172,8 @@ static struct usb_device_id id_table [] = {
/* Sierra Wireless Device */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) },
{ USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */
+ { USB_DEVICE(0x1199, 0x0027) }, /* Sierra Wireless Device */
+ { USB_DEVICE(0x1199, 0x0028) }, /* Sierra Wireless Device */
{ USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
@@ -187,6 +186,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */
{ USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */
{ USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */
+ { USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */
{ USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */
{ USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */
{ USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */
@@ -204,6 +204,8 @@ static struct usb_device_id id_table [] = {
/* Sierra Wireless Device */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)},
/* Sierra Wireless Device */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6891, 0xFF, 0xFF, 0xFF)},
+ /* Sierra Wireless Device */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
{ USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e39c779e416..9a3e495c769 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1744,7 +1744,7 @@ static int ti_download_firmware(struct ti_device *tdev, int type)
if (buffer) {
memcpy(buffer, fw_p->data, fw_p->size);
memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size);
- ti_do_download(dev, pipe, buffer, fw_p->size);
+ status = ti_do_download(dev, pipe, buffer, fw_p->size);
kfree(buffer);
}
release_firmware(fw_p);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index b157c48e8b7..4f7f9e3ae0a 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -733,7 +733,9 @@ int usb_serial_probe(struct usb_interface *interface,
((le16_to_cpu(dev->descriptor.idVendor) == ATEN_VENDOR_ID) &&
(le16_to_cpu(dev->descriptor.idProduct) == ATEN_PRODUCT_ID)) ||
((le16_to_cpu(dev->descriptor.idVendor) == ALCOR_VENDOR_ID) &&
- (le16_to_cpu(dev->descriptor.idProduct) == ALCOR_PRODUCT_ID))) {
+ (le16_to_cpu(dev->descriptor.idProduct) == ALCOR_PRODUCT_ID)) ||
+ ((le16_to_cpu(dev->descriptor.idVendor) == SIEMENS_VENDOR_ID) &&
+ (le16_to_cpu(dev->descriptor.idProduct) == SIEMENS_PRODUCT_ID_EF81))) {
if (interface != dev->actconfig->interface[0]) {
/* check out the endpoints of the other interface*/
iface_desc = dev->actconfig->interface[0]->cur_altsetting;
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index c76034672c1..3d9249632ae 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -146,18 +146,6 @@ config USB_STORAGE_KARMA
on the resulting scsi device node returns the Karma to normal
operation.
-config USB_STORAGE_SIERRA
- bool "Sierra Wireless TRU-Install Feature Support"
- depends on USB_STORAGE
- help
- Say Y here to include additional code to support Sierra Wireless
- products with the TRU-Install feature (e.g., AC597E, AC881U).
-
- This code switches the Sierra Wireless device from being in
- Mass Storage mode to Modem mode. It also has the ability to
- support host software upgrades should full Linux support be added
- to TRU-Install.
-
config USB_STORAGE_CYPRESS_ATACB
bool "SAT emulation on Cypress USB/ATA Bridge with ATACB"
depends on USB_STORAGE
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index bc3415b475c..7f8beb5366a 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -21,11 +21,10 @@ usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += jumpshot.o
usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o
usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o
usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o
-usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA) += sierra_ms.o
usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o
usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \
- initializers.o $(usb-storage-obj-y)
+ initializers.o sierra_ms.o $(usb-storage-obj-y)
ifneq ($(CONFIG_USB_LIBUSUAL),)
obj-$(CONFIG_USB) += libusual.o
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ba412e68d47..cd155475cb6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -160,6 +160,13 @@ UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0592,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),
+/* Reported by Filip Joelsson <filip@blueturtle.nu> */
+UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600,
+ "Nokia",
+ "Nokia 3110c",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
/* Reported by Mario Rettig <mariorettig@web.de> */
UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100,
"Nokia",
@@ -232,6 +239,20 @@ UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Richard Nauber <RichardNauber@web.de> */
+UNUSUAL_DEV( 0x0421, 0x04fa, 0x0601, 0x0601,
+ "Nokia",
+ "6300",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
+/* Patch for Nokia 5310 capacity */
+UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591,
+ "Nokia",
+ "5310",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210,
"SMSC",
@@ -987,6 +1008,13 @@ UNUSUAL_DEV( 0x069b, 0x3004, 0x0001, 0x0001,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Adrian Pilchowiec <adi1981@epf.pl> */
+UNUSUAL_DEV( 0x071b, 0x3203, 0x0000, 0x0000,
+ "RockChip",
+ "MP3",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64),
+
/* Reported by Massimiliano Ghilardi <massimiliano.ghilardi@gmail.com>
* This USB MP3/AVI player device fails and disconnects if more than 128
* sectors (64kB) are read/written in a single command, and may be present
@@ -1576,7 +1604,6 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
US_SC_DEVICE, US_PR_DEVICE, NULL,
0),
-#ifdef CONFIG_USB_STORAGE_SIERRA
/* Reported by Kevin Lloyd <linux@sierrawireless.com>
* Entry is needed for the initializer function override,
* which instructs the device to load as a modem
@@ -1587,7 +1614,6 @@ UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999,
"USB MMC Storage",
US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init,
0),
-#endif
/* Reported by Jaco Kroon <jaco@kroon.co.za>
* The usb-storage module found on the Digitech GNX4 (and supposedly other
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 73679aa506d..27016fd2cad 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -102,9 +102,7 @@
#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
#include "cypress_atacb.h"
#endif
-#ifdef CONFIG_USB_STORAGE_SIERRA
#include "sierra_ms.h"
-#endif
/* Some informational data */
MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index c6299e8a041..9cbff84b787 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -2400,11 +2400,15 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
if (!fbcon_is_inactive(vc, info)) {
if (ops->blank_state != blank) {
+ int ret = 1;
+
ops->blank_state = blank;
fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
ops->cursor_flash = (!blank);
- if (fb_blank(info, blank))
+ if (info->fbops->fb_blank)
+ ret = info->fbops->fb_blank(blank, info);
+ if (ret)
fbcon_generic_blank(vc, info, blank);
}
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index a6e38e9ea73..89a346880ec 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -110,7 +110,7 @@ static inline int mono_col(const struct fb_info *info)
__u32 max_len;
max_len = max(info->var.green.length, info->var.red.length);
max_len = max(info->var.blue.length, max_len);
- return ~(0xfff << (max_len & 0xff));
+ return (~(0xfff << max_len)) & 0xff;
}
static inline int attr_col_ec(int shift, struct vc_data *vc,
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 614a5c7017b..6799a6de66f 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -130,8 +130,8 @@ static ssize_t geodewdt_write(struct file *file, const char __user *data,
return len;
}
-static int geodewdt_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long geodewdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
@@ -198,7 +198,7 @@ static const struct file_operations geodewdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = geodewdt_write,
- .ioctl = geodewdt_ioctl,
+ .unlocked_ioctl = geodewdt_ioctl,
.open = geodewdt_open,
.release = geodewdt_release,
};
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index b82405cfb4c..89fcefcc851 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -85,7 +85,6 @@ static void __asr_toggle(void)
outb(reg & ~asr_toggle_mask, asr_write_addr);
reg = inb(asr_read_addr);
- spin_unlock(&asr_lock);
}
static void asr_toggle(void)
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 0ed84162437..6d9f3d4a998 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -173,8 +173,8 @@ static const struct watchdog_info ident = {
.identity = "PNX4008 Watchdog",
};
-static long pnx4008_wdt_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long pnx4008_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
int ret = -ENOTTY;
int time;
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 6756bcb009e..c9c73b69c5e 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -182,8 +182,8 @@ static ssize_t rc32434_wdt_write(struct file *file, const char *data,
return 0;
}
-static int rc32434_wdt_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
void __user *argp = (void __user *)arg;
int new_timeout;
@@ -242,7 +242,7 @@ static struct file_operations rc32434_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = rc32434_wdt_write,
- .ioctl = rc32434_wdt_ioctl,
+ .unlocked_ioctl = rc32434_wdt_ioctl,
.open = rc32434_wdt_open,
.release = rc32434_wdt_release,
};
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 9108efa73e7..bf92802f2bb 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -144,8 +144,8 @@ static int rdc321x_wdt_release(struct inode *inode, struct file *file)
return 0;
}
-static int rdc321x_wdt_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
void __user *argp = (void __user *)arg;
unsigned int value;
@@ -204,7 +204,7 @@ static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf,
static const struct file_operations rdc321x_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .ioctl = rdc321x_wdt_ioctl,
+ .unlocked_ioctl = rdc321x_wdt_ioctl,
.open = rdc321x_wdt_open,
.write = rdc321x_wdt_write,
.release = rdc321x_wdt_release,
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index db362c34958..191ea630210 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -115,8 +115,8 @@ static int watchdog_release(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t watchdog_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
+static ssize_t watchdog_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
{
/*
* Refresh the timer.
@@ -133,21 +133,22 @@ static const struct watchdog_info ident = {
};
static long watchdog_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
unsigned int new_margin;
+ int __user *int_arg = (int __user *)arg;
int ret = -ENOTTY;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = 0;
- if (copy_to_user((void *)arg, &ident, sizeof(ident)))
+ if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
ret = -EFAULT;
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int *)arg);
+ ret = put_user(0, int_arg);
break;
case WDIOC_KEEPALIVE:
@@ -156,7 +157,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
break;
case WDIOC_SETTIMEOUT:
- ret = get_user(new_margin, (int *)arg);
+ ret = get_user(new_margin, int_arg);
if (ret)
break;
@@ -171,7 +172,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
watchdog_ping();
/* Fall */
case WDIOC_GETTIMEOUT:
- ret = put_user(soft_margin, (int *)arg);
+ ret = put_user(soft_margin, int_arg);
break;
}
return ret;